diff --git a/.github/workflows/ci-casper-event-sidecar-rs.yml b/.github/workflows/ci-casper-event-sidecar-rs.yml index e6deff05..2b1d1271 100644 --- a/.github/workflows/ci-casper-event-sidecar-rs.yml +++ b/.github/workflows/ci-casper-event-sidecar-rs.yml @@ -1,5 +1,5 @@ --- -name: ci-casper-event-sidecar +name: ci-casper-sidecar on: push: @@ -44,9 +44,9 @@ jobs: - name: audit # Hope to get to here: # run: cargo audit --deny warnings - # RUSTSEC-2022-0093 - that is an issue that comes form casper-types, need to update that depenency as soon as a new release is made # RUSTSEC-2023-0071 - there is a transitive audit issue via sqlx. There is no fix for that yet, we should update dependencies once a fix is presented - run: cargo audit --ignore RUSTSEC-2022-0093 --ignore RUSTSEC-2023-0071 + # RUSTSEC-2024-0363 - issue in sqlx 0.8.0, no fix available yet + run: cargo audit --ignore RUSTSEC-2023-0071 --ignore RUSTSEC-2024-0363 - name: test run: cargo test @@ -56,4 +56,4 @@ jobs: cargo install cargo-deb - name: deb - run: cargo deb --package casper-event-sidecar + run: cargo deb --package casper-sidecar diff --git a/.github/workflows/publish-casper-event-sidecar-deb.yml b/.github/workflows/publish-casper-event-sidecar-deb.yml index 95e7f44d..1b0e21af 100644 --- a/.github/workflows/publish-casper-event-sidecar-deb.yml +++ b/.github/workflows/publish-casper-event-sidecar-deb.yml @@ -1,10 +1,13 @@ --- -name: publish-casper-event-sidecar-deb +name: publish-casper-sidecar-deb +permissions: + contents: read + id-token: write on: push: tags: - - "v*" + - "v*.*.*" jobs: publish_deb: @@ -13,23 +16,31 @@ jobs: include: - os: ubuntu-20.04 code_name: focal - - os: ubuntu-22.04 - code_name: jammy +# - os: ubuntu-22.04 +# code_name: jammy +# - os: ubuntu-24.04 +# code_name: noble runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2 - - uses: Swatinem/rust-cache@cb2cf0cc7c5198d3364b9630e2c3d457f160790c #v1.4.0 with: key: ${{ matrix.code_name }} + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ACCESS_ROLE_REPO }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ secrets.AWS_ACCESS_REGION_REPO }} + - name: Install deps run: | echo "deb http://repo.aptly.info/ squeeze main" | sudo tee -a /etc/apt/sources.list.d/aptly.list wget -qO - https://www.aptly.info/pubkey.txt | sudo apt-key add - sudo apt-get update - sudo apt-get install -y awscli aptly=1.2.0 + sudo apt-get install -y aptly=1.4.0 aptly config show - name: update toolchain @@ -45,35 +56,20 @@ jobs: run: cargo install cargo-deb - name: Cargo deb - run: cargo deb --no-build --variant ${{ matrix.code_name }} + run: cargo deb --package casper-sidecar --variant ${{ matrix.code_name }} - name: Upload binaries to repo env: - AWS_SECRET_ACCESS_KEY: ${{ secrets.APTLY_SECRET_KEY }} - AWS_ACCESS_KEY_ID: ${{ secrets.APTLY_ACCESS_KEY }} - PLUGIN_REPO_NAME: ${{ secrets.APTLY_REPO }} - PLUGIN_REGION: ${{ secrets.APTLY_REGION }} + PLUGIN_REPO_NAME: ${{ secrets.AWS_BUCKET_REPO }} + PLUGIN_REGION: ${{ secrets.AWS_ACCESS_REGION_REPO }} PLUGIN_GPG_KEY: ${{ secrets.APTLY_GPG_KEY }} PLUGIN_GPG_PASS: ${{ secrets.APTLY_GPG_PASS }} - PLUGIN_ACL: 'public-read' + PLUGIN_ACL: 'private' PLUGIN_PREFIX: 'releases' PLUGIN_DEB_PATH: './target/debian' PLUGIN_OS_CODENAME: ${{ matrix.code_name }} run: ./ci/publish_deb_to_repo.sh - - name: Invalidate cloudfront - uses: chetan/invalidate-cloudfront-action@c384d5f09592318a77b1e5c0c8d4772317e48b25 #v2.4 - env: - DISTRIBUTION: ${{ secrets.APTLY_DIST_ID }} - PATHS: "/*" - AWS_REGION: ${{ secrets.APTLY_REGION }} - AWS_ACCESS_KEY_ID: ${{ secrets.APTLY_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.APTLY_SECRET_KEY }} - - - name: Upload binaries to release - uses: svenstaro/upload-release-action@133984371c30d34e38222a64855679a414cb7575 #v2.3.0 - with: - repo_token: ${{ secrets.TOKEN_FOR_GITHUB }} - file: target/debian/casper-event-sidecar/* - tag: ${{ github.ref }} - file_glob: true + - name: Invalidate CloudFront cache + run: | + aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_REPO }} --paths "/*" diff --git a/.gitignore b/.gitignore index cf3e9187..1f15c918 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ /target -/test_output \ No newline at end of file +/test_output +# direnv-related files +.direnv/ +.envrc + diff --git a/Cargo.lock b/Cargo.lock index 063222d8..4d305c65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,27 +1,27 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -30,23 +30,12 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.6" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom", + "getrandom 0.2.15", "once_cell", "serde", "version_check", @@ -55,9 +44,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -79,9 +68,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "ansi-str" @@ -98,63 +87,71 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "220044e6a1bb31ddee4e3db724d29767f352de47445a6cd75e1a173142136c83" dependencies = [ - "nom", + "nom 7.1.3", "vte", ] [[package]] name = "anstream" -version = "0.6.5" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "once_cell", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "archiver-rs" @@ -164,11 +161,17 @@ dependencies = [ "bzip2", "flate2", "tar", - "thiserror", + "thiserror 1.0.69", "xz2", "zip", ] +[[package]] +name = "array-init" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" + [[package]] name = "arrayvec" version = "0.5.2" @@ -187,9 +190,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.3.15" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -201,9 +204,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -212,33 +215,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "async-trait" -version = "0.1.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" -dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", -] - -[[package]] -name = "atoi" -version = "1.0.0" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ - "num-traits", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -251,34 +245,30 @@ dependencies = [ ] [[package]] -name = "atomic-write-file" -version = "0.1.2" +name = "atomic-waker" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" -dependencies = [ - "nix", - "rand 0.8.5", -] +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -287,6 +277,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -295,9 +291,15 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -305,13 +307,37 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bit-set" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ - "bit-vec", + "bit-vec 0.6.3", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec 0.8.0", ] [[package]] @@ -320,6 +346,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -328,58 +360,44 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" dependencies = [ "serde", ] -[[package]] -name = "bitvec" -version = "0.18.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98fcd36dda4e17b7d7abc64cb549bf0201f4ab71e00700c798ca7e62ed3761fa" -dependencies = [ - "funty", - "radium", - "wyz", -] - [[package]] name = "blake2" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac", "digest 0.9.0", "opaque-debug", ] [[package]] name = "block-buffer" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-buffer" -version = "0.10.4" +name = "borrow-or-share" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] +checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32" [[package]] name = "brotli" -version = "3.4.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" +checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -388,25 +406,56 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] +[[package]] +name = "bstr" +version = "1.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" +dependencies = [ + "memchr", + "regex-automata 0.4.9", + "serde", +] + [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytecount" -version = "0.6.7" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" + +[[package]] +name = "bytemuck" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] [[package]] name = "byteorder" @@ -416,9 +465,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bzip2" @@ -441,6 +490,24 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "casper-binary-port" +version = "1.0.0" +source = "git+https://github.com/casper-network/casper-node.git?branch=dev#324929ed851ba7a1cda8c768c3292eae8868b625" +dependencies = [ + "bincode", + "bytes", + "casper-types", + "once_cell", + "rand", + "serde", + "strum 0.26.3", + "strum_macros 0.26.4", + "thiserror 1.0.69", + "tokio-util 0.6.10", + "tracing", +] + [[package]] name = "casper-event-listener" version = "1.0.0" @@ -454,16 +521,17 @@ dependencies = [ "eventsource-stream", "futures", "futures-util", + "metrics", "mockito", "once_cell", "portpicker", - "reqwest", + "reqwest 0.12.12", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util", + "tokio-util 0.7.13", "tracing", "url", "warp", @@ -480,40 +548,39 @@ dependencies = [ "casper-event-listener", "casper-event-types", "casper-types", - "clap", "colored", - "derive-new", + "derive-new 0.5.9", "eventsource-stream", "futures", "futures-util", "hex", "hex_fmt", - "http", - "hyper", - "indexmap 2.1.0", - "itertools 0.10.5", - "jsonschema", + "http 0.2.12", + "hyper 0.14.32", + "indexmap 2.7.1", + "itertools", + "jsonschema 0.17.1", + "metrics", "once_cell", "pg-embed", + "pin-project", "portpicker", "pretty_assertions", - "rand 0.8.5", + "rand", "regex", - "reqwest", + "reqwest 0.12.12", "schemars", "sea-query", "serde", "serde_json", - "sqlx 0.7.3", + "sqlx", "tabled", "tempfile", - "thiserror", - "tikv-jemallocator", + "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util", - "toml", - "tower", + "tokio-util 0.7.13", + "tower 0.4.13", "tracing", "tracing-subscriber", "utoipa", @@ -531,32 +598,125 @@ dependencies = [ "casper-types", "hex-buffer-serde", "hex_fmt", + "itertools", + "mockall", "once_cell", - "prometheus", - "rand 0.8.5", + "pretty_assertions", + "rand", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "utoipa", ] +[[package]] +name = "casper-json-rpc" +version = "1.1.0" +dependencies = [ + "bytes", + "env_logger", + "futures", + "http 0.2.12", + "hyper 0.14.32", + "itertools", + "metrics", + "serde", + "serde_json", + "tokio", + "tracing", + "warp", +] + +[[package]] +name = "casper-rpc-sidecar" +version = "1.0.0" +dependencies = [ + "anyhow", + "assert-json-diff", + "async-trait", + "backtrace", + "base16", + "bincode", + "bytes", + "casper-binary-port", + "casper-json-rpc", + "casper-types", + "datasize", + "derive-new 0.6.0", + "futures", + "http 0.2.12", + "hyper 0.14.32", + "jsonschema 0.26.2", + "juliet", + "metrics", + "num-traits", + "num_cpus", + "once_cell", + "portpicker", + "pretty_assertions", + "rand", + "regex", + "schemars", + "serde", + "serde-map-to-array", + "serde_json", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tokio-util 0.6.10", + "toml", + "tower 0.4.13", + "tracing", + "tracing-subscriber", + "vergen", + "warp", +] + +[[package]] +name = "casper-sidecar" +version = "1.0.1" +dependencies = [ + "anyhow", + "async-trait", + "backtrace", + "casper-event-sidecar", + "casper-event-types", + "casper-rpc-sidecar", + "clap", + "datasize", + "derive-new 0.6.0", + "futures", + "num_cpus", + "serde", + "thiserror 1.0.69", + "tikv-jemallocator", + "tokio", + "toml", + "tracing", + "tracing-subscriber", +] + [[package]] name = "casper-types" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d65faf6ea346ce733206a51822cb4da2a76cee29308b0ee4c1f3cba756bdee5" +version = "5.0.0" +source = "git+https://github.com/casper-network/casper-node.git?branch=dev#324929ed851ba7a1cda8c768c3292eae8868b625" dependencies = [ "base16", "base64 0.13.1", + "bincode", "bitflags 1.3.2", "blake2", + "datasize", + "derive_more", "derp", "ed25519-dalek", - "getrandom", + "getrandom 0.2.15", "hex", "hex_fmt", "humantime", + "itertools", "k256", + "libc", "num", "num-derive", "num-integer", @@ -566,26 +726,29 @@ dependencies = [ "pem", "proptest", "proptest-derive", - "rand 0.8.5", + "rand", "rand_pcg", "schemars", "serde", + "serde-map-to-array", "serde_bytes", "serde_json", - "strum", - "thiserror", + "strum 0.24.1", + "thiserror 1.0.69", + "tracing", "uint", "untrusted 0.7.1", ] [[package]] name = "cc" -version = "1.0.83" +version = "1.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -606,9 +769,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" dependencies = [ "clap_builder", "clap_derive", @@ -616,9 +779,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -628,36 +791,42 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "heck 0.5.0", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "clru" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "colored" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -672,6 +841,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -684,24 +859,24 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] @@ -714,63 +889,61 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-queue" -version = "0.3.9" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" -dependencies = [ - "cfg-if", -] +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] -name = "crypto-common" -version = "0.1.6" +name = "crypto-bigint" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "typenum", + "rand_core", + "subtle", + "zeroize", ] [[package]] -name = "crypto-mac" -version = "0.8.0" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "subtle", + "typenum", ] [[package]] name = "crypto-mac" -version = "0.10.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array", "subtle", @@ -778,57 +951,151 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version", "subtle", "zeroize", ] [[package]] -name = "data-encoding" -version = "2.5.0" +name = "curve25519-dalek-derive" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] [[package]] -name = "der" -version = "0.7.8" +name = "darling" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "const-oid", - "pem-rfc7468", - "zeroize", + "darling_core", + "darling_macro", ] [[package]] -name = "deranged" -version = "0.3.10" +name = "darling_core" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ - "powerfmt", + "fnv", + "ident_case", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] -name = "derive-new" -version = "0.5.9" +name = "darling_macro" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 1.0.109", + "darling_core", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] -name = "derp" -version = "0.0.14" +name = "data-encoding" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" + +[[package]] +name = "datasize" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e65c07d59e45d77a8bda53458c24a828893a99ac6cdd9c84111e09176ab739a2" +dependencies = [ + "datasize_derive", + "fake_instant", + "serde", +] + +[[package]] +name = "datasize_derive" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613e4ee15899913285b7612004bbd490abd605be7b11d35afada5902fb6b91d5" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 1.0.109", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive-new" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 1.0.109", +] + +[[package]] +name = "derive-new" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + +[[package]] +name = "derive_more" +version = "0.99.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +dependencies = [ + "convert_case", + "proc-macro2 1.0.93", + "quote 1.0.38", + "rustc_version", + "syn 2.0.98", +] + +[[package]] +name = "derp" +version = "0.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9b84cfd9b6fa437e498215e5625e9e3ae3bf9bb54d623028a181c40820db169" dependencies = [ @@ -856,51 +1123,42 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", ] -[[package]] -name = "dirs" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" -dependencies = [ - "dirs-sys 0.3.7", -] - [[package]] name = "dirs" version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ - "dirs-sys 0.4.1", + "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi", + "windows-sys 0.48.0", ] [[package]] -name = "dirs-sys" -version = "0.4.1" +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -909,80 +1167,129 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "feeef44e73baff3a26d371801df019877a9866a8c493d315ab00177843314f35" [[package]] name = "ecdsa" -version = "0.10.2" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fbdb4ff710acb4db8ca29f93b897529ea6d6a45626d5183b47e012aa6ae7e4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ + "der", + "digest 0.10.7", "elliptic-curve", - "hmac 0.10.1", - "signature 1.2.2", + "rfc6979", + "signature", ] [[package]] name = "ed25519" -version = "1.2.0" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "signature 1.2.2", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand 0.7.3", - "sha2 0.9.9", + "serde", + "sha2", + "subtle", "zeroize", ] [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" dependencies = [ "serde", ] [[package]] name = "elliptic-curve" -version = "0.8.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2db227e61a43a34915680bdda462ec0e212095518020a88a1f91acd16092c39" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "bitvec", - "digest 0.9.0", + "base16ct", + "crypto-bigint", + "digest 0.10.7", "ff", - "funty", "generic-array", "group", - "rand_core 0.5.1", + "rand_core", + "sec1", "subtle", "zeroize", ] +[[package]] +name = "email_address" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449" +dependencies = [ + "serde", +] + [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -991,12 +1298,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1023,74 +1330,107 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" dependencies = [ "futures-core", - "nom", + "nom 7.1.3", "pin-project-lite", ] +[[package]] +name = "fake_instant" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3006df2e7bf21592b4983931164020b02f54eefdc1e35b2f70147858cc1e20ad" + [[package]] name = "fancy-regex" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" dependencies = [ - "bit-set", + "bit-set 0.5.3", "regex", ] +[[package]] +name = "fancy-regex" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298" +dependencies = [ + "bit-set 0.8.0", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "faster-hex" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183" + [[package]] name = "fastrand" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" -version = "0.8.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01646e077d4ebda82b73f1bca002ea1e91561a77df2431a9e79729bcc31950ef" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "bitvec", - "rand_core 0.5.1", + "rand_core", "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "filetime" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", - "windows-sys 0.52.0", + "libredox", + "windows-sys 0.59.0", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1918b65d96df47d3591bed19c5cca17e3fa5d0707318e4b5ef2eae01764df7e5" +dependencies = [ + "borrow-or-share", + "ref-cast", + "serde", +] + [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", - "spin 0.9.8", + "spin", ] [[package]] @@ -1134,16 +1474,26 @@ dependencies = [ ] [[package]] -name = "funty" -version = "1.1.0" +name = "fraction" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f158e3ff0a1b334408dc9fb811cd99b446986f4d8b741bb08f9df1604085ae7" +dependencies = [ + "lazy_static", + "num", +] + +[[package]] +name = "fragile" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1156,9 +1506,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1166,32 +1516,21 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", "futures-util", ] -[[package]] -name = "futures-intrusive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" -dependencies = [ - "futures-core", - "lock_api", - "parking_lot 0.11.2", -] - [[package]] name = "futures-intrusive" version = "0.5.0" @@ -1200,43 +1539,43 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot 0.12.1", + "parking_lot", ] [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1258,134 +1597,685 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.28.1" +name = "getrandom" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] [[package]] -name = "group" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc11f9f5fbf1943b48ae7c2bf6846e7d827a512d1be4f23af708f5ca5d01dde1" -dependencies = [ - "ff", - "rand_core 0.5.1", - "subtle", +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "gix" +version = "0.63.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "984c5018adfa7a4536ade67990b3ebc6e11ab57b3d6cd9968de0947ca99b4b06" +dependencies = [ + "gix-actor", + "gix-commitgraph", + "gix-config", + "gix-date", + "gix-diff", + "gix-discover", + "gix-features", + "gix-fs", + "gix-glob", + "gix-hash", + "gix-hashtable", + "gix-index", + "gix-lock", + "gix-macros", + "gix-object", + "gix-odb", + "gix-pack", + "gix-path", + "gix-ref", + "gix-refspec", + "gix-revision", + "gix-revwalk", + "gix-sec", + "gix-tempfile", + "gix-trace", + "gix-traverse", + "gix-url", + "gix-utils", + "gix-validate", + "once_cell", + "parking_lot", + "signal-hook", + "smallvec", + "thiserror 1.0.69", ] [[package]] -name = "h2" -version = "0.3.22" +name = "gix-actor" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +checksum = "a0e454357e34b833cc3a00b6efbbd3dd4d18b24b9fb0c023876ec2645e8aa3f2" dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap 2.1.0", - "slab", - "tokio", - "tokio-util", - "tracing", + "bstr", + "gix-date", + "gix-utils", + "itoa", + "thiserror 1.0.69", + "winnow", ] [[package]] -name = "hashbrown" -version = "0.12.3" +name = "gix-bitmap" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "b1db9765c69502650da68f0804e3dc2b5f8ccc6a2d104ca6c85bc40700d37540" +dependencies = [ + "thiserror 2.0.11", +] [[package]] -name = "hashbrown" -version = "0.14.3" +name = "gix-chunk" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "0b1f1d8764958699dc764e3f727cef280ff4d1bd92c107bbf8acd85b30c1bd6f" dependencies = [ - "ahash 0.8.6", - "allocator-api2", + "thiserror 2.0.11", ] [[package]] -name = "hashlink" -version = "0.8.4" +name = "gix-commitgraph" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "133b06f67f565836ec0c473e2116a60fb74f80b6435e21d88013ac0e3c60fc78" dependencies = [ - "hashbrown 0.14.3", + "bstr", + "gix-chunk", + "gix-features", + "gix-hash", + "memmap2", + "thiserror 1.0.69", ] [[package]] -name = "headers" -version = "0.3.9" +name = "gix-config" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +checksum = "53fafe42957e11d98e354a66b6bd70aeea00faf2f62dd11164188224a507c840" dependencies = [ - "base64 0.21.5", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", + "bstr", + "gix-config-value", + "gix-features", + "gix-glob", + "gix-path", + "gix-ref", + "gix-sec", + "memchr", + "once_cell", + "smallvec", + "thiserror 1.0.69", + "unicode-bom", + "winnow", ] [[package]] -name = "headers-core" -version = "0.2.0" +name = "gix-config-value" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "11365144ef93082f3403471dbaa94cfe4b5e72743bdb9560719a251d439f4cee" dependencies = [ - "http", + "bitflags 2.8.0", + "bstr", + "gix-path", + "libc", + "thiserror 2.0.11", ] [[package]] -name = "heck" -version = "0.4.1" +name = "gix-date" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "9eed6931f21491ee0aeb922751bd7ec97b4b2fe8fbfedcb678e2a2dce5f3b8c0" dependencies = [ - "unicode-segmentation", + "bstr", + "itoa", + "thiserror 1.0.69", + "time", ] [[package]] -name = "hermit-abi" -version = "0.3.3" +name = "gix-diff" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "1996d5c8a305b59709467d80617c9fde48d9d75fd1f4179ea970912630886c9d" +dependencies = [ + "bstr", + "gix-hash", + "gix-object", + "thiserror 1.0.69", +] [[package]] -name = "hex" -version = "0.4.3" +name = "gix-discover" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "fc27c699b63da66b50d50c00668bc0b7e90c3a382ef302865e891559935f3dbf" +dependencies = [ + "bstr", + "dunce", + "gix-fs", + "gix-hash", + "gix-path", + "gix-ref", + "gix-sec", + "thiserror 1.0.69", +] [[package]] -name = "hex-buffer-serde" -version = "0.3.0" +name = "gix-features" +version = "0.38.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f52012c160668b4494727f3588045aa00429849fcae51de70d68fa98228039" +checksum = "ac7045ac9fe5f9c727f38799d002a7ed3583cd777e3322a7c4b43e3cf437dc69" dependencies = [ - "hex", + "crc32fast", + "flate2", + "gix-hash", + "gix-trace", + "gix-utils", + "libc", + "once_cell", + "prodash", + "sha1_smol", + "thiserror 1.0.69", + "walkdir", +] + +[[package]] +name = "gix-fs" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2bfe6249cfea6d0c0e0990d5226a4cb36f030444ba9e35e0639275db8f98575" +dependencies = [ + "fastrand", + "gix-features", + "gix-utils", +] + +[[package]] +name = "gix-glob" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74908b4bbc0a0a40852737e5d7889f676f081e340d5451a16e5b4c50d592f111" +dependencies = [ + "bitflags 2.8.0", + "bstr", + "gix-features", + "gix-path", +] + +[[package]] +name = "gix-hash" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93d7df7366121b5018f947a04d37f034717e113dcf9ccd85c34b58e57a74d5e" +dependencies = [ + "faster-hex", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-hashtable" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ddf80e16f3c19ac06ce415a38b8591993d3f73aede049cb561becb5b3a8e242" +dependencies = [ + "gix-hash", + "hashbrown 0.14.5", + "parking_lot", +] + +[[package]] +name = "gix-index" +version = "0.33.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a9a44eb55bd84bb48f8a44980e951968ced21e171b22d115d1cdcef82a7d73f" +dependencies = [ + "bitflags 2.8.0", + "bstr", + "filetime", + "fnv", + "gix-bitmap", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-traverse", + "gix-utils", + "gix-validate", + "hashbrown 0.14.5", + "itoa", + "libc", + "memmap2", + "rustix", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-lock" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bc7fe297f1f4614774989c00ec8b1add59571dc9b024b4c00acb7dedd4e19d" +dependencies = [ + "gix-tempfile", + "gix-utils", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-macros" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + +[[package]] +name = "gix-object" +version = "0.42.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25da2f46b4e7c2fa7b413ce4dffb87f69eaf89c2057e386491f4c55cadbfe386" +dependencies = [ + "bstr", + "gix-actor", + "gix-date", + "gix-features", + "gix-hash", + "gix-utils", + "gix-validate", + "itoa", + "smallvec", + "thiserror 1.0.69", + "winnow", +] + +[[package]] +name = "gix-odb" +version = "0.61.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20d384fe541d93d8a3bb7d5d5ef210780d6df4f50c4e684ccba32665a5e3bc9b" +dependencies = [ + "arc-swap", + "gix-date", + "gix-features", + "gix-fs", + "gix-hash", + "gix-object", + "gix-pack", + "gix-path", + "gix-quote", + "parking_lot", + "tempfile", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-pack" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0594491fffe55df94ba1c111a6566b7f56b3f8d2e1efc750e77d572f5f5229" +dependencies = [ + "clru", + "gix-chunk", + "gix-features", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-path", + "memmap2", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-path" +version = "0.10.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c40f12bb65a8299be0cfb90fe718e3be236b7a94b434877012980863a883a99f" +dependencies = [ + "bstr", + "gix-trace", + "home", + "once_cell", + "thiserror 2.0.11", +] + +[[package]] +name = "gix-quote" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e49357fccdb0c85c0d3a3292a9f6db32d9b3535959b5471bb9624908f4a066c6" +dependencies = [ + "bstr", + "gix-utils", + "thiserror 2.0.11", +] + +[[package]] +name = "gix-ref" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3394a2997e5bc6b22ebc1e1a87b41eeefbcfcff3dbfa7c4bd73cb0ac8f1f3e2e" +dependencies = [ + "gix-actor", + "gix-date", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-path", + "gix-tempfile", + "gix-utils", + "gix-validate", + "memmap2", + "thiserror 1.0.69", + "winnow", +] + +[[package]] +name = "gix-refspec" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6868f8cd2e62555d1f7c78b784bece43ace40dd2a462daf3b588d5416e603f37" +dependencies = [ + "bstr", + "gix-hash", + "gix-revision", + "gix-validate", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-revision" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01b13e43c2118c4b0537ddac7d0821ae0dfa90b7b8dbf20c711e153fb749adce" +dependencies = [ + "bstr", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "gix-trace", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-revwalk" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b030ccaab71af141f537e0225f19b9e74f25fefdba0372246b844491cab43e0" +dependencies = [ + "gix-commitgraph", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-sec" +version = "0.10.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d84dae13271f4313f8d60a166bf27e54c968c7c33e2ffd31c48cafe5da649875" +dependencies = [ + "bitflags 2.8.0", + "gix-path", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "gix-tempfile" +version = "14.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046b4927969fa816a150a0cda2e62c80016fe11fb3c3184e4dddf4e542f108aa" +dependencies = [ + "gix-fs", + "libc", + "once_cell", + "parking_lot", + "signal-hook", + "signal-hook-registry", + "tempfile", +] + +[[package]] +name = "gix-trace" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c396a2036920c69695f760a65e7f2677267ccf483f25046977d87e4cb2665f7" + +[[package]] +name = "gix-traverse" +version = "0.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e499a18c511e71cf4a20413b743b9f5bcf64b3d9e81e9c3c6cd399eae55a8840" +dependencies = [ + "bitflags 2.8.0", + "gix-commitgraph", + "gix-date", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-url" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd280c5e84fb22e128ed2a053a0daeacb6379469be6a85e3d518a0636e160c89" +dependencies = [ + "bstr", + "gix-features", + "gix-path", + "home", + "thiserror 1.0.69", + "url", +] + +[[package]] +name = "gix-utils" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff08f24e03ac8916c478c8419d7d3c33393da9bb41fa4c24455d5406aeefd35f" +dependencies = [ + "fastrand", + "unicode-normalization", +] + +[[package]] +name = "gix-validate" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82c27dd34a49b1addf193c92070bcbf3beaf6e10f16a78544de6372e146a0acf" +dependencies = [ + "bstr", + "thiserror 1.0.69", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.7.1", + "slab", + "tokio", + "tokio-util 0.7.13", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap 2.7.1", + "slab", + "tokio", + "tokio-util 0.7.13", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "headers" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 0.2.12", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.12", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-buffer-serde" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f52012c160668b4494727f3588045aa00429849fcae51de70d68fa98228039" +dependencies = [ + "hex", "serde", ] @@ -1399,19 +2289,9 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" name = "hkdf" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" -dependencies = [ - "hmac 0.12.1", -] - -[[package]] -name = "hmac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", + "hmac", ] [[package]] @@ -1425,18 +2305,29 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1450,15 +2341,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.2.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1474,17 +2388,17 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1496,6 +2410,44 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.7", + "http 1.2.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http 1.2.0", + "hyper 1.6.0", + "hyper-util", + "rustls 0.23.22", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1503,20 +2455,190 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.32", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.6.0", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "hyper 1.6.0", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1532,24 +2654,24 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "serde", ] [[package]] name = "inherent" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce243b1bfa62ffc028f1cc3b6034ec63d649f3031bc8a4fbbb004e1ac17d1f68" +checksum = "0122b7114117e64a63ac49f752a5ca4624d534c7b1c7de796ac196381cd2d947" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -1562,38 +2684,24 @@ dependencies = [ ] [[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" +name = "ipnet" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] -name = "ipnet" -version = "2.9.0" +name = "is_terminal_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "iso8601" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924e5d73ea28f59011fec52a0d12185d496a9b075d360657aed2a5707f701153" +checksum = "c5c177cff824ab21a6f41079a4c401241c4e8be14f316c4c6b07d5fca351c98d" dependencies = [ - "nom", + "nom 8.0.0", ] [[package]] @@ -1605,36 +2713,28 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" -dependencies = [ - "either", -] - [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1644,23 +2744,23 @@ version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a071f4f7efc9a9118dfb627a0a94ef247986e1ab8606a4c806ae2b3aa3b6978" dependencies = [ - "ahash 0.8.6", + "ahash", "anyhow", - "base64 0.21.5", + "base64 0.21.7", "bytecount", "clap", - "fancy-regex", - "fraction", - "getrandom", + "fancy-regex 0.11.0", + "fraction 0.13.1", + "getrandom 0.2.15", "iso8601", "itoa", "memchr", "num-cmp", "once_cell", - "parking_lot 0.12.1", + "parking_lot", "percent-encoding", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "time", @@ -1668,48 +2768,91 @@ dependencies = [ "uuid", ] +[[package]] +name = "jsonschema" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26a960f0c34d5423581d858ce94815cc11f0171b09939409097969ed269ede1b" +dependencies = [ + "ahash", + "base64 0.22.1", + "bytecount", + "email_address", + "fancy-regex 0.14.0", + "fraction 0.15.3", + "idna", + "itoa", + "num-cmp", + "once_cell", + "percent-encoding", + "referencing", + "regex-syntax 0.8.5", + "reqwest 0.12.12", + "serde", + "serde_json", + "uuid-simd", +] + +[[package]] +name = "juliet" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4336a0d5e38193caafe774bd2be027cf5aa3c3e45b3f1bda1791fcacc9e9951d" +dependencies = [ + "array-init", + "bimap", + "bytemuck", + "bytes", + "futures", + "once_cell", + "strum 0.25.0", + "thiserror 1.0.69", + "tokio", + "tracing", +] + [[package]] name = "k256" -version = "0.7.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4476a0808212a9e81ce802eb1a0cfc60e73aea296553bacc0fac7e1268bc572a" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.9.9", + "sha2", ] [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] name = "libc" -version = "0.2.151" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.8.0", "libc", - "redox_syscall 0.4.1", + "redox_syscall", ] [[package]] @@ -1725,21 +2868,21 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] -name = "linux-raw-sys" -version = "0.4.12" +name = "litemap" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1747,9 +2890,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "lzma-sys" @@ -1762,6 +2905,15 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "md-5" version = "0.10.6" @@ -1774,9 +2926,26 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memmap2" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + +[[package]] +name = "metrics" +version = "1.0.0" +dependencies = [ + "once_cell", + "prometheus", +] [[package]] name = "mime" @@ -1786,9 +2955,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -1802,36 +2971,68 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "mockall" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" dependencies = [ - "adler", + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", ] [[package]] -name = "mio" -version = "0.8.10" +name = "mockall_derive" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ - "libc", - "wasi", - "windows-sys 0.48.0", + "cfg-if", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "mockito" -version = "1.2.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8d3038e23466858569c2d30a537f691fa0d53b51626630ae08262943e3bbb8b" +checksum = "652cd6d169a36eaf9d1e6bce1a221130439a966d7f27858af66a33a66e9c4ee2" dependencies = [ "assert-json-diff", + "bytes", "colored", - "futures", - "hyper", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-util", "log", - "rand 0.8.5", + "rand", "regex", "serde_json", "serde_urlencoded", @@ -1848,22 +3049,21 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 0.2.12", "httparse", "log", "memchr", "mime", - "spin 0.9.8", + "spin", "version_check", ] [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -1876,24 +3076,22 @@ dependencies = [ ] [[package]] -name = "nix" -version = "0.27.1" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "bitflags 2.4.1", - "cfg-if", - "libc", + "memchr", + "minimal-lexical", ] [[package]] name = "nom" -version = "7.1.3" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" dependencies = [ "memchr", - "minimal-lexical", ] [[package]] @@ -1908,9 +3106,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", "num-complex", @@ -1922,11 +3120,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -1943,7 +3140,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand 0.8.5", + "rand", "smallvec", "zeroize", ] @@ -1956,39 +3153,44 @@ checksum = "63335b2e2c34fae2fb0aa2cecfd9f0832a1e24b3b32ecec612c3426d46dc8aaa" [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" -version = "0.3.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 1.0.109", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -1997,21 +3199,21 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", + "serde", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -2027,34 +3229,43 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "object" -version = "0.32.1" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.61" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.8.0", "cfg-if", "foreign-types", "libc", @@ -2069,22 +3280,22 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.97" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -2098,6 +3309,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "outref" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" + [[package]] name = "overload" version = "0.1.1" @@ -2119,50 +3336,25 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2172,15 +3364,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -2189,9 +3381,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.7", - "hmac 0.12.1", + "hmac", "password-hash", - "sha2 0.10.8", + "sha2", ] [[package]] @@ -2223,47 +3415,47 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pg-embed" version = "0.7.2" -source = "git+https://github.com/faokunega/pg-embed?tag=v0.8.0#72db5e053f0afac6eee51d3baa2fd5c90803e02d" +source = "git+https://github.com/zajko/pg-embed?branch=bump_dependencies#66b94eccf91e8b198b8ebb054693c1f732809d17" dependencies = [ "archiver-rs", "async-trait", "bytes", - "dirs 5.0.1", + "dirs", "futures", "lazy_static", "log", - "reqwest", - "sqlx 0.6.3", - "thiserror", + "reqwest 0.11.27", + "sqlx", + "thiserror 1.0.69", "tokio", "zip", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2294,9 +3486,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portpicker" @@ -2304,7 +3496,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -2315,15 +3507,44 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "predicates" +version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -2336,8 +3557,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", "version_check", ] @@ -2348,8 +3569,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "version_check", ] @@ -2364,58 +3585,74 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] [[package]] name = "procfs" -version = "0.14.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de8dacb0873f77e6aefc6d71e044761fcc68060290f5b1089fcdf84626bb69" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 1.3.2", - "byteorder", + "bitflags 2.8.0", "hex", "lazy_static", - "rustix 0.36.17", + "procfs-core", + "rustix", +] + +[[package]] +name = "procfs-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +dependencies = [ + "bitflags 2.8.0", + "hex", ] +[[package]] +name = "prodash" +version = "28.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744a264d26b88a6a7e37cbad97953fa233b94d585236310bcbc88474b4092d79" + [[package]] name = "prometheus" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if", "fnv", "lazy_static", "libc", "memchr", - "parking_lot 0.12.1", + "parking_lot", "procfs", "protobuf", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "proptest" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ - "bit-set", - "bit-vec", - "bitflags 2.4.1", + "bit-set 0.8.0", + "bit-vec 0.8.0", + "bitflags 2.8.0", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -2455,28 +3692,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2 1.0.70", -] - -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "rand" -version = "0.7.3" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "proc-macro2 1.0.93", ] [[package]] @@ -2486,18 +3706,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -2507,31 +3717,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" - [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom 0.2.15", ] [[package]] @@ -2540,7 +3735,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -2549,83 +3744,122 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.8.0", ] [[package]] -name = "redox_syscall" -version = "0.4.1" +name = "redox_users" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "bitflags 1.3.2", + "getrandom 0.2.15", + "libredox", + "thiserror 1.0.69", ] [[package]] -name = "redox_users" -version = "0.4.4" +name = "ref-cast" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ - "getrandom", - "libredox", - "thiserror", + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + +[[package]] +name = "referencing" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb8e15af8558cb157432dd3d88c1d1e982d0a5755cf80ce593b6499260aebc49" +dependencies = [ + "ahash", + "fluent-uri", + "once_cell", + "percent-encoding", + "serde_json", ] [[package]] name = "regex" -version = "1.10.2" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", @@ -2634,56 +3868,99 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "system-configuration", + "sync_wrapper 0.1.2", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", - "tokio-util", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", "web-sys", "winreg", ] [[package]] -name = "ring" -version = "0.16.20" +name = "reqwest" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ - "cc", - "libc", + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.4.7", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-rustls", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "system-configuration 0.6.1", + "tokio", + "tokio-native-tls", + "tokio-util 0.7.13", + "tower 0.5.2", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", "web-sys", - "winapi", + "windows-registry", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", ] [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "getrandom", + "cfg-if", + "getrandom 0.2.15", "libc", - "spin 0.9.8", + "spin", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rsa" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519" dependencies = [ "const-oid", "digest 0.10.7", @@ -2692,8 +3969,8 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core 0.6.4", - "signature 2.2.0", + "rand_core", + "signature", "spki", "subtle", "zeroize", @@ -2701,9 +3978,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "6.8.1" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" +checksum = "fa66af4a4fdd5e7ebc276f115e895611a34739a9c1c01028383d612d550953c0" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -2712,87 +3989,129 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "6.8.1" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" +checksum = "6125dbc8867951125eec87294137f4e9c2c96566e61bf72c45095a7c77761478" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "rust-embed-utils", - "shellexpand", - "syn 2.0.41", + "syn 2.0.98", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "7.8.1" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" +checksum = "2e5347777e9aacb56039b0e1f28785929a8a3b709e87482e7442c72e7c12529d" dependencies = [ - "sha2 0.10.8", + "sha2", "walkdir", ] [[package]] -name = "rustc-demangle" -version = "0.1.23" +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.8.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +dependencies = [ + "once_cell", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", +] [[package]] -name = "rustix" -version = "0.36.17" +name = "rustls-pemfile" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305efbd14fde4139eb501df5f136994bb520b033fa9fbdce287507dc23b8c7ed" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.1.4", - "windows-sys 0.45.0", + "base64 0.21.7", ] [[package]] -name = "rustix" -version = "0.38.28" +name = "rustls-pemfile" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "bitflags 2.4.1", - "errno", - "libc", - "linux-raw-sys 0.4.12", - "windows-sys 0.52.0", + "rustls-pki-types", ] [[package]] -name = "rustls" -version = "0.20.9" +name = "rustls-pki-types" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "log", - "ring 0.16.20", - "sct", - "webpki", + "ring", + "untrusted 0.9.0", ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "rustls-webpki" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "base64 0.21.5", + "ring", + "rustls-pki-types", + "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -2808,9 +4127,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "same-file" @@ -2823,18 +4142,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "schemars" -version = "0.8.5" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82485a532ef0af18878ad4281f73e58161cdba1db7918176e9294f0ca5498a5" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" dependencies = [ "dyn-clone", "indexmap 1.9.3", @@ -2845,14 +4164,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.5" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791c2c848cff1abaeae34fef7e70da5f93171d9eea81ce0fe969a1df627a61a8" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "serde_derive_internals", - "syn 1.0.109", + "syn 2.0.98", ] [[package]] @@ -2873,15 +4192,15 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring", "untrusted 0.9.0", ] [[package]] name = "sea-query" -version = "0.30.5" +version = "0.30.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e40446e3c048cec0802375f52462a05cc774b9ea6af1dffba6c646b7825e4cf9" +checksum = "4166a1e072292d46dc91f31617c2a1cdaf55a8be4b5c9f4bf2ba248e3ac4999b" dependencies = [ "inherent", "sea-query-derive", @@ -2889,24 +4208,38 @@ dependencies = [ [[package]] name = "sea-query-derive" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25a82fcb49253abcb45cdcb2adf92956060ec0928635eb21b4f7a6d8f25ab0bc" +checksum = "9834af2c4bd8c5162f00c89f1701fb6886119a88062cf76fe842ea9e232b9839" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", - "thiserror", + "darling", + "heck 0.4.1", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", + "thiserror 1.0.69", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "subtle", + "zeroize", ] [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -2915,62 +4248,79 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", ] +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + [[package]] name = "serde" -version = "1.0.193" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-map-to-array" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c14b52efc56c711e0dbae3f26e0cc233f5dac336c1bf0b07e1b7dc2dca3b2cc7" +dependencies = [ + "schemars", + "serde", +] + [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "serde_derive_internals" -version = "0.25.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dbab34ca63057a1f15280bdf3c39f2b1eb1b54c17e98360e511637aef7418c6" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 1.0.109", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.7.1", "itoa", + "memchr", "ryu", "serde", ] @@ -2999,17 +4349,10 @@ dependencies = [ ] [[package]] -name = "sha2" -version = "0.9.9" +name = "sha1_smol" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" [[package]] name = "sha2" @@ -3032,31 +4375,28 @@ dependencies = [ ] [[package]] -name = "shellexpand" -version = "2.1.2" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" -dependencies = [ - "dirs 4.0.0", -] +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "signal-hook-registry" -version = "1.4.1" +name = "signal-hook" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", + "signal-hook-registry", ] [[package]] -name = "signature" -version = "1.2.2" +name = "signal-hook-registry" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ - "digest 0.9.0", - "rand_core 0.5.1", + "libc", ] [[package]] @@ -3066,14 +4406,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] name = "similar" -version = "2.3.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] name = "slab" @@ -3086,26 +4426,20 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -3127,33 +4461,22 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ - "itertools 0.12.0", - "nom", + "nom 7.1.3", "unicode_categories", ] [[package]] name = "sqlx" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188" -dependencies = [ - "sqlx-core 0.6.3", - "sqlx-macros 0.6.3", -] - -[[package]] -name = "sqlx" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +checksum = "c9a2ccff1a000a5a59cd33da541d9f2fdcd9e6e8229cc200565942bff36d0aaa" dependencies = [ - "sqlx-core 0.7.3", - "sqlx-macros 0.7.3", + "sqlx-core", + "sqlx-macros", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", @@ -3161,148 +4484,77 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029" -dependencies = [ - "ahash 0.7.7", - "atoi 1.0.0", - "base64 0.13.1", - "bitflags 1.3.2", - "byteorder", - "bytes", - "crc", - "crossbeam-queue", - "dirs 4.0.0", - "dotenvy", - "either", - "event-listener", - "futures-channel", - "futures-core", - "futures-intrusive 0.4.2", - "futures-util", - "hashlink", - "hex", - "hkdf", - "hmac 0.12.1", - "indexmap 1.9.3", - "itoa", - "libc", - "log", - "md-5", - "memchr", - "once_cell", - "paste", - "percent-encoding", - "rand 0.8.5", - "rustls", - "rustls-pemfile", - "serde", - "serde_json", - "sha1", - "sha2 0.10.8", - "smallvec", - "sqlformat", - "sqlx-rt", - "stringprep", - "thiserror", - "tokio-stream", - "url", - "webpki-roots", - "whoami", -] - -[[package]] -name = "sqlx-core" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +checksum = "24ba59a9342a3d9bab6c56c118be528b27c9b60e490080e9711a04dccac83ef6" dependencies = [ - "ahash 0.8.6", - "atoi 2.0.0", + "ahash", + "atoi", "byteorder", "bytes", "crc", "crossbeam-queue", - "dotenvy", "either", "event-listener", "futures-channel", "futures-core", - "futures-intrusive 0.5.0", + "futures-intrusive", "futures-io", "futures-util", "hashlink", "hex", - "indexmap 2.1.0", + "indexmap 2.7.1", "log", "memchr", "native-tls", "once_cell", "paste", "percent-encoding", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", - "sha2 0.10.8", + "sha2", "smallvec", "sqlformat", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", "url", + "webpki-roots", ] [[package]] name = "sqlx-macros" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9" -dependencies = [ - "dotenvy", - "either", - "heck", - "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", - "sha2 0.10.8", - "sqlx-core 0.6.3", - "sqlx-rt", - "syn 1.0.109", - "url", -] - -[[package]] -name = "sqlx-macros" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +checksum = "4ea40e2345eb2faa9e1e5e326db8c34711317d2b5e08d0d5741619048a803127" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "sqlx-core 0.7.3", + "proc-macro2 1.0.93", + "quote 1.0.38", + "sqlx-core", "sqlx-macros-core", "syn 1.0.109", ] [[package]] name = "sqlx-macros-core" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8" dependencies = [ - "atomic-write-file", "dotenvy", "either", - "heck", + "heck 0.4.1", "hex", "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "serde", "serde_json", - "sha2 0.10.8", - "sqlx-core 0.7.3", + "sha2", + "sqlx-core", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", @@ -3314,13 +4566,13 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +checksum = "1ed31390216d20e538e447a7a9b959e06ed9fc51c37b514b46eb758016ecd418" dependencies = [ - "atoi 2.0.0", - "base64 0.21.5", - "bitflags 2.4.1", + "atoi", + "base64 0.21.7", + "bitflags 2.8.0", "byteorder", "bytes", "crc", @@ -3334,35 +4586,35 @@ dependencies = [ "generic-array", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "itoa", "log", "md-5", "memchr", "once_cell", "percent-encoding", - "rand 0.8.5", + "rand", "rsa", "serde", "sha1", - "sha2 0.10.8", + "sha2", "smallvec", - "sqlx-core 0.7.3", + "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.69", "tracing", "whoami", ] [[package]] name = "sqlx-postgres" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +checksum = "7c824eb80b894f926f89a0b9da0c7f435d27cdd35b8c655b114e58223918577e" dependencies = [ - "atoi 2.0.0", - "base64 0.21.5", - "bitflags 2.4.1", + "atoi", + "base64 0.21.7", + "bitflags 2.8.0", "byteorder", "crc", "dotenvy", @@ -3373,60 +4625,54 @@ dependencies = [ "futures-util", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "home", "itoa", "log", "md-5", "memchr", "once_cell", - "rand 0.8.5", + "rand", "serde", "serde_json", - "sha1", - "sha2 0.10.8", + "sha2", "smallvec", - "sqlx-core 0.7.3", + "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.69", "tracing", "whoami", ] -[[package]] -name = "sqlx-rt" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024" -dependencies = [ - "once_cell", - "tokio", - "tokio-rustls", -] - [[package]] name = "sqlx-sqlite" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +checksum = "b244ef0a8414da0bed4bb1910426e890b19e5e9bccc27ada6b797d05c55ae0aa" dependencies = [ - "atoi 2.0.0", + "atoi", "flume", "futures-channel", "futures-core", "futures-executor", - "futures-intrusive 0.5.0", + "futures-intrusive", "futures-util", "libsqlite3-sys", "log", "percent-encoding", "serde", - "sqlx-core 0.7.3", + "sqlx-core", "tracing", "url", "urlencoding", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3435,20 +4681,20 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -3456,27 +4702,68 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros 0.25.3", ] +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + [[package]] name = "strum_macros" version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", - "proc-macro2 1.0.70", - "quote 1.0.33", + "heck 0.4.1", + "proc-macro2 1.0.93", + "quote 1.0.38", "rustversion", "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.93", + "quote 1.0.38", + "rustversion", + "syn 2.0.98", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2 1.0.93", + "quote 1.0.38", + "rustversion", + "syn 2.0.98", +] + [[package]] name = "subtle" -version = "2.4.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -3495,22 +4782,48 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.41" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3519,7 +4832,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.8.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -3532,6 +4856,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tabled" version = "0.10.0" @@ -3550,18 +4884,18 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] [[package]] name = "tar" -version = "0.4.40" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -3570,42 +4904,69 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", - "redox_syscall 0.4.1", - "rustix 0.38.28", - "windows-sys 0.48.0", + "getrandom 0.3.1", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "1.0.51" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "thiserror-impl", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -3633,11 +4994,15 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", + "itoa", + "libc", + "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -3652,18 +5017,29 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ + "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -3676,32 +5052,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", - "parking_lot 0.12.1", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -3716,32 +5091,31 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls", + "rustls 0.23.22", "tokio", - "webpki", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util", + "tokio-util 0.7.13", ] [[package]] name = "tokio-tungstenite" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ "futures-util", "log", @@ -3751,16 +5125,29 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -3782,29 +5169,44 @@ dependencies = [ "futures-util", "pin-project-lite", "tokio", - "tokio-util", + "tokio-util 0.7.13", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tokio", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -3814,20 +5216,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3844,18 +5246,35 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", + "tracing-serde", ] [[package]] @@ -3866,19 +5285,19 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.2.0", "httparse", "log", - "rand 0.8.5", + "rand", "sha1", - "thiserror", + "thiserror 1.0.69", "url", "utf-8", ] @@ -3909,45 +5328,54 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-bidi" -version = "0.3.14" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-bom" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +checksum = "7eec5d1121208364f6793f7d2e222bf75a915c19557537745b195b253dd64217" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" @@ -3975,9 +5403,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -3996,19 +5424,31 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "utoipa" -version = "3.5.0" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82b1bc5417102a73e8464c686eef947bdfb99fcdfc0a4f228e81afa9526470a" +checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.7.1", "serde", "serde_json", "utoipa-gen", @@ -4016,21 +5456,21 @@ dependencies = [ [[package]] name = "utoipa-gen" -version = "3.5.0" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d96dcd6fc96f3df9b3280ef480770af1b7c5d14bc55192baa9b067976d920c" +checksum = "20c24e8ab68ff9ee746aad22d39b5535601e6416d1b0feeabf78be986a5c4392" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "utoipa-swagger-ui" -version = "3.1.5" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84614caa239fb25b2bb373a52859ffd94605ceb256eeb1d63436325cf81e3653" +checksum = "0b39868d43c011961e04b41623e050aedf2cc93652562ff7935ce0f819aaf2da" dependencies = [ "mime_guess", "regex", @@ -4043,15 +5483,26 @@ dependencies = [ [[package]] name = "uuid" -version = "1.6.1" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" + +[[package]] +name = "uuid-simd" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8" +dependencies = [ + "outref", + "uuid", + "vsimd", +] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -4059,11 +5510,30 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vergen" +version = "8.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" +dependencies = [ + "anyhow", + "cfg-if", + "gix", + "rustversion", + "time", +] + [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" [[package]] name = "vte" @@ -4078,28 +5548,28 @@ dependencies = [ [[package]] name = "vte_generate_state_changes" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" +checksum = "2e369bee1b05d510a7b4ed645f5faa90619e05437111783ea5848f28d97d3c2e" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", + "proc-macro2 1.0.93", + "quote 1.0.38", ] [[package]] name = "wait-timeout" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" dependencies = [ "libc", ] [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -4116,32 +5586,30 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" dependencies = [ "async-compression", "bytes", "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.12", + "hyper 0.14.32", "log", "mime", "mime_guess", "multer", "percent-encoding", "pin-project", - "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-stream", "tokio-tungstenite", - "tokio-util", + "tokio-util 0.7.13", "tower-service", "tracing", ] @@ -4152,77 +5620,97 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", + "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.39" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ - "quote 1.0.33", + "quote 1.0.38", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -4233,32 +5721,19 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" -dependencies = [ - "ring 0.17.7", - "untrusted 0.9.0", -] - [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "wheelbuf" @@ -4268,12 +5743,12 @@ checksum = "62945bc99a6a121cb2759c7bfa7b779ddf0e69b68bb35a9b23ab72276cfdcd3c" [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "wasm-bindgen", - "web-sys", + "redox_syscall", + "wasite", ] [[package]] @@ -4294,11 +5769,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -4308,12 +5783,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.45.0" +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-targets 0.42.2", + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -4331,22 +5827,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] -name = "windows-targets" -version = "0.42.2" +name = "windows-sys" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.52.6", ] [[package]] @@ -4366,25 +5856,20 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -4393,15 +5878,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4411,15 +5890,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4429,15 +5902,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] -name = "windows_i686_msvc" -version = "0.42.2" +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4447,15 +5920,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4465,15 +5932,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4483,27 +5944,30 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "windows_x86_64_msvc" -version = "0.52.0" +name = "winnow" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "1e90edd2ac1aa278a5c4599b1d89cf03074b610800f866d4026dc199d7929a28" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -4516,20 +5980,35 @@ dependencies = [ ] [[package]] -name = "wyz" -version = "0.2.0" +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "xattr" -version = "1.1.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dae5072fe1f8db8f8d29059189ac175196e410e40ba42d5d4684ae2f750995" +checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" dependencies = [ "libc", - "linux-raw-sys 0.4.12", - "rustix 0.38.28", + "linux-raw-sys", + "rustix", ] [[package]] @@ -4543,48 +6022,102 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", + "synstructure", +] [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", + "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ - "zeroize_derive", + "yoke", + "zerofrom", + "zerovec-derive", ] [[package]] -name = "zeroize_derive" -version = "1.4.2" +name = "zerovec-derive" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ - "proc-macro2 1.0.70", - "quote 1.0.33", - "syn 2.0.41", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -4600,7 +6133,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "flate2", - "hmac 0.12.1", + "hmac", "pbkdf2", "sha1", "time", @@ -4628,9 +6161,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index d6ecb6e6..c4bcbd53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,34 @@ [workspace] resolver = "1" members = [ - "sidecar", + "event_sidecar", + "json_rpc", "listener", + "metrics", + "rpc_sidecar", + "sidecar", "types", ] [workspace.dependencies] -once_cell = "1.18.0" +anyhow = "1" async-stream = "0.3.4" +async-trait = "0.1.77" +casper-types = { git = "https://github.com/casper-network/casper-node.git", branch = "dev", features = ["json-schema"]} +casper-binary-port = { git = "https://github.com/casper-network/casper-node.git", branch = "dev" } +casper-event-sidecar = { path = "./event_sidecar", version = "1.0.0" } +casper-event-types = { path = "./types", version = "1.0.0" } +casper-rpc-sidecar = { path = "./rpc_sidecar", version = "1.0.0" } +datasize = "0.2.11" +futures = "0" futures-util = "0.3.28" +itertools = "0.10.3" +metrics = { path = "./metrics", version = "1.0.0" } +once_cell = "1.18.0" +thiserror = "1" +tokio = "1.23.1" +toml = "0.5.8" +tracing = { version = "0", default-features = false } +tracing-subscriber = "0" +serde = { version = "1", default-features = false } +jsonschema = "0.26.2" diff --git a/EXAMPLE_NCTL_CONFIG.toml b/EXAMPLE_NCTL_CONFIG.toml deleted file mode 100644 index 7dbee9e9..00000000 --- a/EXAMPLE_NCTL_CONFIG.toml +++ /dev/null @@ -1,46 +0,0 @@ -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18101 -rest_port = 14101 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18102 -rest_port = 14102 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = false - -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18103 -rest_port = 14103 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = false -connection_timeout_in_seconds = 3 - -[storage] -storage_path = "./target/storage" - -[storage.sqlite_config] -file_name = "sqlite_database.db3" -max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint -wal_autocheckpointing_interval = 1000 - -[rest_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 - -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 diff --git a/EXAMPLE_NCTL_POSTGRES_CONFIG.toml b/EXAMPLE_NCTL_POSTGRES_CONFIG.toml deleted file mode 100644 index b380eb9e..00000000 --- a/EXAMPLE_NCTL_POSTGRES_CONFIG.toml +++ /dev/null @@ -1,47 +0,0 @@ -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18101 -rest_port = 14101 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18102 -rest_port = 14102 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = false - -[[connections]] -ip_address = "127.0.0.1" -sse_port = 18103 -rest_port = 14103 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = false -connection_timeout_in_seconds = 3 - -[storage] -storage_path = "./target/storage" - -[storage.postgresql_config] -database_name = "event_sidecar" -host = "localhost" -database_password = "p@$$w0rd" -database_username = "postgres" -max_connections_in_pool = 30 - -[rest_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 - -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 diff --git a/EXAMPLE_NODE_CONFIG.toml b/EXAMPLE_NODE_CONFIG.toml deleted file mode 100644 index 212db146..00000000 --- a/EXAMPLE_NODE_CONFIG.toml +++ /dev/null @@ -1,50 +0,0 @@ -[[connections]] -ip_address = "127.0.0.1" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[connections]] -ip_address = "168.254.51.2" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[connections]] -ip_address = "168.254.51.3" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[storage] -storage_path = "/var/lib/casper-event-sidecar" - -[storage.sqlite_config] -file_name = "sqlite_database.db3" -max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint -wal_autocheckpointing_interval = 1000 - -[rest_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 - -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 - -[admin_server] -port = 18887 -max_concurrent_requests = 1 -max_requests_per_second = 1 \ No newline at end of file diff --git a/LEGACY_SSE_EMULATION.md b/LEGACY_SSE_EMULATION.md new file mode 100644 index 00000000..9cd94933 --- /dev/null +++ b/LEGACY_SSE_EMULATION.md @@ -0,0 +1,593 @@ +# The Legacy SSE Emulation + +Casper node versions 2.0 or greater (2.x) produce different SSE events than 1.x versions. Also, 1.x Casper nodes used 3 SSE endpoints (`/events/sigs`, `/events/deploys`, `/events/main`), while 2.x nodes expose all the SSE events on one endpoint (`/events`). + +Generally, the changes in 2.x regarding SSE are somewhat backward-incompatible. To collect all the data, clients should adopt the new SSE API. However, if some clients are not ready or do not need to adopt the new SSE API, they can use the legacy SSE emulation. + +SSE emulation is off by default. To enable it, follow the steps below and read the main [README.md](./README.md#sse-server-configuration) file describing how to configure the SSE server. + +**LIMITATIONS:** + +Before enabling the legacy SSE emulation, consider its limitations: + +- The legacy SSE emulation is a temporary solution and may be removed in a future major release of the node software. +- The legacy SSE emulation does not map 2.x events to 1.x events in a 1-to-1 fashion. Some events are omitted, some are transformed, and some are passed through. Below are more details on the emulation's limitations. +- The legacy SSE emulation places an extra burden on resources. It will consume more resources than the native 2.x SSE API. +- The legacy SSE emulation will consume more resources than the "native" 2.x SSE API. + +> **Note**: 2.x node versions label new block events with `Version2`. In the rare case that a 2.x node sees a legacy block, it will label events coming from this block with `Version1`. The notion of Version1 and Version2 is new to 2.x, and wasn't present in 1.x node versions. So, for the legacy SSE emulation, both Version1 and Version2 BlockAdded events will be transformed to the old BlockAdded event format from 1.x. + +## Configuration + +To enable the legacy SSE emulation, set the `emulate_legacy_sse_apis` setting to `["V1"]`. Currently, this is the only possible value: + +```toml +[sse_server] +(...) +emulate_legacy_sse_apis = ["V1"] +(...) +``` + +This setting will expose three legacy SSE endpoints with the following events streamed on each endpoint: + +- `/events/main` - `ApiVersion`, `BlockAdded`, `DeployProcessed`, `DeployExpired`, `Fault` and `Shutdown` +- `/events/deploys`- `ApiVersion`, `DeployAccepted` and `Shutdown` +- `/events/sigs` - `ApiVersion`, `FinalitySignature` and `Shutdown` + +Those endpoints will emit events in the same format as the legacy SSE API of the Casper node. + +## Event Mapping + +There are limitations to what the Casper Sidecar can and will do. Below, you will find a list of mapping assumptions between 2.x events and 1.x events. + +- [`ApiVersion` events](#the-apiversion-event) +- [`BlockAdded` events](#the-blockadded-event) +- [`TransactionAccepted` events](#the-transactionaccepted-event) +- [`TransactionExpired` events](#the-transactionexpired-event) +- [`TransactionProcessed` events](#the-transactionprocessed-event) + +### `ApiVersion` events + +The legacy SSE ApiVersion event is the same as the current version. + +### `BlockAdded` events + +The Sidecar can emit a legacy `BlockAdded` event by unwrapping the 2.x event structure and creating a 1.x emulated event structure. + +A Version1 `BlockAdded` event will be unwrapped and passed as a legacy `BlockAdded` event as shown below. + +**Version1 BlockAdded in 2.x:** + +```json +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "Version1": { + "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "header": { + "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", + "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", + "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", + "random_bit": true, + "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", + "era_end": { + "era_report": { + "equivocators": [ + "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", + "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" + ], + "rewards": [ + { + "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", + "amount": 129457537 + } + ], + "inactive_validators": [] + }, + "next_era_validator_weights": [ + { + "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", + "weight": "1" + }, + { + "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + "weight": "2" + } + ] + }, + "timestamp": "2024-04-25T20:00:35.640Z", + "era_id": 601701, + "height": 6017012, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", + "deploy_hashes": [ + "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", + "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" + ], + "transfer_hashes": [ + "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", + "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" + ] + } + } + } + } +} +``` + +**Emulated 1.x BlockAdded (from Version1):** + +```json +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "header": { + "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", + "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", + "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", + "random_bit": true, + "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", + "era_end": { + "era_report": { + "equivocators": [ + "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", + "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" + ], + "rewards": [ + { + "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", + "amount": 129457537 + } + ], + "inactive_validators": [] + }, + "next_era_validator_weights": [ + { + "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", + "weight": "1" + }, + { + "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + "weight": "2" + } + ] + }, + "timestamp": "2024-04-25T20:00:35.640Z", + "era_id": 601701, + "height": 6017012, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", + "deploy_hashes": [ + "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", + "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" + ], + "transfer_hashes": [ + "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", + "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" + ] + } + } + } +} +``` + +When the 2.x event stream emits a legacy `BlockAdded` event, the following mapping rules apply: + +- `block_hash` will be copied from Version2 to Version1. +- `block.block_hash` will be copied from Version2 to Version1. +- `block.header.era_end`: + - If the `era_end` is a Version1 variety - it will be copied. + - If the `era_end` is a Version2 variety: + - Version2 `next_era_validator_weights` will be copied from Version2 `next_era_validator_weights`. + - Version1 `era_report` will be assembled from the Version2 `era_end.equivocators`, `era_end.rewards` and `era_end.inactive_validators` fields. + - If one of the `rewards` contains a reward that doesn't fit in a u64 (because Version2 has U512 type in rewards values) - the whole `era_end` **WILL BE OMITTED** from the legacy Version1 block (value None). + - Version2 field `next_era_gas_price` has no equivalent in Version1 and will be omitted. +- `block.header.current_gas_price` this field only exists in Version2 and will be omitted from the Version1 block header. +- `block.header.proposer` will be copied from Version2 to Version1 `block.body.proposer`. +- other `block.header.*` fields will be copied from Version2 to Version1. +- `block.body.deploy_hashes` will be based on Version2 `block.body.standard` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to Version1 `block.body.deploy_hashes` array. +- `block.body.transfer_hashes` will be based on Version2 `block.body.mint` transactions. Bear in mind, that only values of transactions of type `Deploy` will be copied to Version1 `block.body.transfer_hashes` array. + +Here is an example mapping demonstrating the rules above: + +**Version2 BlockAdded in 2.x:** + +```json +{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "Version2": { + "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "header": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", + "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", + "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", + "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", + "random_bit": false, + "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", + "era_end": { + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "inactive_validators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" + ], + "next_era_validator_weights": [ + { + "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", + "weight": "1" + }, + { + "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", + "weight": "2" + } + ], + "rewards": { + "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc": "749546792", + "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2": "788342677", + "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec": "86241635", + "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c": "941794198" + }, + "next_era_gas_price": 1 + }, + "timestamp": "2024-04-25T20:31:39.895Z", + "era_id": 419571, + "height": 4195710, + "protocol_version": "2.0.0", + "current_gas_price": 1 + }, + "body": { + "transactions": { + "0": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e82" + }], + "1": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e83" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e84" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e85" + }], + "2": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e86" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e87" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e88" + }], + "3": [{ + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89" + }, + { + "Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" + }, + { + "Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e91" + }] + } + "rewarded_signatures": [[240], [0], [0]] + } + } + } + } +} +``` + +**Emulated 1.x BlockAdded (from Version2 BlockAdded):** + +```json +{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "header": { + "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", + "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", + "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", + "random_bit": false, + "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", + "era_end": { + "era_report": { + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "rewards": [ + { + "validator": "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c", + "amount": 941794198 + }, + { + "validator": "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2", + "amount": 788342677 + }, + { + "validator": "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc", + "amount": 749546792 + }, + { + "validator": "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec", + "amount": 86241635 + } + ], + "inactive_validators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" + ] + }, + "next_era_validator_weights": [ + { + "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", + "weight": "2" + }, + { + "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", + "weight": "1" + } + ] + }, + "timestamp": "2024-04-25T20:31:39.895Z", + "era_id": 419571, + "height": 4195710, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", + "deploy_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e89", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90" + ], + "transfer_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" + ] + } + } + } +} +``` + +### `TransactionAccepted` events + +Version1 `TransactionAccepted` events will be unwrapped and translated to legacy `DeployAccepted` events on the legacy SSE stream. + +**Version1 TransactionAccepted in 2.x:** + +```json +{ + "TransactionAccepted": { + "Deploy": { + "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", + "header": { + "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "timestamp": "2020-08-07T01:28:27.360Z", + "ttl": "4m 22s", + "gas_price": 72, + "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", + "dependencies": [], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "U512", + "bytes": "0400f90295", + "parsed": "2500000000" + } + ] + ] + } + }, + "session": { + "StoredContractByHash": { + "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", + "entry_point": "8sY9fUUCwoiFZmxKo8kj", + "args": [ + [ + "YbZWtEuL4D6oMTJmUWvj", + { + "cl_type": { + "List": "U8" + }, + "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", + "parsed": [144, 159, 254, 120, 7] + } + ] + ] + } + }, + "approvals": [ + { + "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" + } + ] + } + } +} +``` + +**Emulated 1.x DeployAccepted (from Version1):** + +```json +{ + "DeployAccepted": { + "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", + "header": { + "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "timestamp": "2020-08-07T01:28:27.360Z", + "ttl": "4m 22s", + "gas_price": 72, + "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", + "dependencies": [], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "U512", + "bytes": "0400f90295", + "parsed": "2500000000" + } + ] + ] + } + }, + "session": { + "StoredContractByHash": { + "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", + "entry_point": "8sY9fUUCwoiFZmxKo8kj", + "args": [ + [ + "YbZWtEuL4D6oMTJmUWvj", + { + "cl_type": { + "List": "U8" + }, + "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", + "parsed": [144, 159, 254, 120, 7] + } + ] + ] + } + }, + "approvals": [ + { + "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" + } + ] + } +} +``` + +All Version1 variants will be omitted from legacy SSE streams. For example, the following Version1 `TransactionAccepted` event will not be streamed: + +```json +"TransactionAccepted": { + "Version1": { + ... +``` + +### `TransactionExpired` events + +Other transaction types will be unwrapped and sent as legacy deploy types. + +A 2.x `TransactionExpired` event will be mapped to a `DeployExpired` event. + +**TransactionExpired mapped to DeployExpired:** + +```json +{ + "TransactionExpired": { + "transaction_hash": { + "Deploy": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } + } +} +``` + +```json +{ + "DeployExpired": { + "deploy_hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } +} +``` + +All Version1 variants will be omitted from legacy SSE streams. For example, the following Version1 `TransactionExpired` event will not be streamed: + +```json +{ + "TransactionExpired": { + "Version1": { + "hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } + } +} +``` + +### `TransactionProcessed` events + +When translating a `TransactionProcessed` event to a legacy `DeployProcessed` event, the following rules apply: + +- If the `transaction_hash` field contains `Version1`, the event will be ignored. +- If the `transaction_hash` field is a `Deploy`, its value will be used as `DeployProcessed.deploy_hash`. + - If the `initiator_addr` field is not a `PublicKey` type, the event will be omitted. + - If the `initiator_addr` field is a `PublicKey` type, its value will be used as `DeployProcessed.account`. + - `timestamp`, `ttl`, `block_hash` will be filled from analogous fields in the `TransactionProcessed` event. + - If the `execution_result` contains `Version1`, its value will be copied as-is to the `DeployProcessed.execution_result` field. + - If the `execution_result` contains `Version2`, see [this paragraph](#translating-executionresultv2). + +#### Translating `ExecutionResultV2` + +When translating the `ExecutionResultV2` (`ex_v2`) to a legacy `ExecutionResult` (`ex_v1`), the following rules apply: + +- If the `ex_v2.error_message` is not empty, the `ExecutionResult` will be of type `Failure`, and the `ex_v1.error_message` will be set to that value. Otherwise, `ex_v1` will be of type `Success`. +- The `ex_v1.cost` will be set to the `ex_v2.cost`. +- The `ex_v1.transfers` list will always be empty since the 2.x node no longer uses a' TransferAddr' notion. +- The `ex_v1.effect` will be populated based on the `ex_v2.effects` field, applying the rules from [Translating Effects from Version2](#translating-effects-from-v2). + +#### Translating `Effects` from Version2 + +When translating the `Effects` from Version2 to Version1, the following rules apply: + +- The output `operations` field will always be an empty list since the 2.x node no longer uses this concept for execution results. +- For `transforms`, the objects will be constructed based on the `ex_v2.effects` with the following exceptions: + - The Version2 `AddKeys` transform will be translated to the Version1 `NamedKeys` transform. + - The Version2 `Write` transform will be translated by applying the rules from paragraph [Translating Write transforms from Version2](#translating-write-transform-from-v2). If at least one `Write` transform is not translatable (yielding a `None` value), the transform will be an empty array. + +#### Translating `Write` transforms from Version2 + +When translating `Write` transforms from Version2 to Version1, the following rules apply: + +- `CLValue`: will be copied to the `WriteCLValue` transform. +- `Account`: will be copied to the `WriteAccount` transform, assigning the Version2 `account_hash` as the value for `WriteAccount`. +- `ContractWasm`: a `WriteContractWasm` transform will be created. Please note that the `WriteContractWasm` will not contain data, so the Version2 details will be omitted. +- `Contract`: a `WriteContract` transform will be created. Please note that the `WriteContract` will not contain data, so the Version2 details will be omitted. +- `ContractPackage`: a `WriteContractPackage` transform will be created. Please note that the `WriteContractPackage` will not contain data, so the Version2 details will be omitted. +- `LegacyTransfer`: a `WriteTransfer` transform will be created. Data will be copied. +- `DeployInfo`: a `WriteDeployInfo` transform will be created. Data will be copied. +- `EraInfo`: an `EraInfo` transform will be created. Data will be copied. +- `Bid`: a `WriteBid` transform will be created. Data will be copied. +- `Withdraw`: a `WriteWithdraw` transform will be created. Data will be copied. +- `NamedKey`: will be translated into an `AddKeys` transform. Data will be copied. +- `AddressableEntity`: the mapping will yield value `None`, meaning no value will be created. +- `BidKind`: the mapping will yield value `None`, meaning no value will be created. +- `Package`: the mapping will yield value `None`, meaning no value will be created. +- `ByteCode`: the mapping will yield value `None`, meaning no value will be created. +- `MessageTopic`: the mapping will yield value `None`, meaning no value will be created. +- `Message`: the mapping will yield value `None`, meaning no value will be created. diff --git a/README.md b/README.md index 1ade2677..e5ab8efe 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,284 @@ -# Casper Event Sidecar README +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +# The Casper Sidecar + +- [Summary of Purpose](#summary-of-purpose) +- [System Components and Architecture](#system-components-and-architecture) + - [The SSE server](#the-sse-server) + - [The REST API server](#the-rest-api-server) + - [The Admin API server](#the-admin-api-server) + - [The RPC API server](#the-rpc-api-server) +- [Configuring the Sidecar](#configuring-the-sidecar) + - [RPC server setup](#rpc-server-setup) + - [SSE server setup](#sse-server-setup) + - [Configuring SSE node connections](#configuring-sse-node-connections) + - [Configuring SSE legacy emulations](#configuring-sse-legacy-emulations) + - [Configuring the event stream](#configuring-the-event-stream) + - [REST server setup](#rest-server-setup) + - [Storage setup](#setup-storage) + - [Database connectivity setup](#database-connectivity-setup) + - [SQLite database](#sqlite-database) + - [PostgreSQL database](#postgresql-database) + - [Admin server setup](#admin-server-setup) +- [Running and Testing the Sidecar](#running-and-testing-the-sidecar) + - [Prerequisites](#prerequisites) + - [Running the Sidecar](#running-the-sidecar) + - [Testing the Sidecar](#testing-the-sidecar) +- [Swagger Documentation](#swagger-documentation) +- [OpenAPI Specification](#openapi-specification) +- [Troubleshooting Tips](#troubleshooting-tips) + - [Checking liveness](#checking-liveness) + - [Checking the node connection](#checking-the-node-connection) + - [Diagnosing errors](#diagnosing-errors) + - [Monitoring memory consumption](#monitoring-memory-consumption) + - [Ensuring sufficient storage](#ensuring-sufficient-storage) + - [Inspecting the REST API](#inspecting-the-rest-api) + - [Limiting concurrent requests](#limiting-concurrent-requests) ## Summary of Purpose -The Casper Event Sidecar is an application that runs in tandem with the node process. This reduces the load on the node process by allowing subscribers to monitor the event stream through the Sidecar while the node focuses entirely on the blockchain. Users needing access to the JSON-RPC will still need to query the node directly. +The Casper Sidecar is an application running in tandem with the node process. It allows subscribers to monitor a node's event stream, query stored events, and query the node's JSON RPC API, thus receiving faster responses and reducing the load placed on the node. Its primary purpose is to: + +- Offload the node from broadcasting SSE events to multiple clients. +- Provide client features that aren't part of the nodes' functionality, nor should they be. While the primary use case for the Sidecar application is running alongside the node on the same machine, it can be run remotely if necessary. -### System Components & Architecture +## System Components and Architecture + +The Casper Sidecar provides the following functionalities: + +- A server-sent events (SSE) server with an `/events` endpoint that streams all the events received from all connected nodes. The Sidecar also stores these events. +- A REST API server that allows clients to query stored events. +- A JSON RPC bridge between end users and a Casper node's binary port. +- Legacy emulation for clients using older versions of the SSE API. + +The Sidecar has the following components and external dependencies: + +```mermaid +--- +title: The Casper Sidecar Components +--- + graph LR; + subgraph CASPER_SIDECAR + SSE_SERVER["SSE server"] + RPC_API_SERVER["RPC API server (JSON)"] + REST_API["Rest API server"] + ADMIN_API["Admin API server"] + end + CONFIG{{"Config file (TOML)"}} + CONFIG --> CASPER_SIDECAR + STORAGE[(Storage)] + NODE_SSE(("Casper node SSE port")) + NODE_BINARY(("Casper node binary port")) + RPC_API_SERVER --> NODE_BINARY + SSE_SERVER --> NODE_SSE + SSE_SERVER --> STORAGE + STORAGE --> REST_API +``` + +### The SSE server + +The SSE Server has these components: + +```mermaid + graph TD; + CLIENT{Client} + CLIENT --> SSE_SERVER_API + STORAGE[("Storage")] + CONFIG{{"Config file (TOML)"}} + MAIN --1.reads--> CONFIG + NODE_SSE{Node SSE port} + SSE_LISTENER --2--> STORAGE + NODE_SSE --1--> SSE_LISTENER + subgraph CASPER_SIDECAR + MAIN[main.rs] + MAIN --2.spawns---> SSE_SERVER + subgraph SSE_SERVER + SSE_SERVER_API["SSE API"] + RING_BUFFER["Events buffer"] + SSE_SERVER_API --> RING_BUFFER + SSE_LISTENER --3--> RING_BUFFER + subgraph "connection" + SSE_LISTENER["SSE listener"] + end + end + end +``` + +The SSE Listener processes events in this order: + +1. Fetch an event from the node's SSE port. +2. Store the event. +3. Publish the event to the SSE API. + +Casper nodes offer an event stream API that returns server-sent events (SSEs) with JSON-encoded data. The Sidecar reads the event stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes. + +The Sidecar can: + +- Republish the current events from the node to clients listening to Sidecar's SSE API. +- Publish a configurable number of previous events to clients connecting to the Sidecar's SSE API with `?start_from=` query (similar to the node's SSE API). +- Store the events in external storage for clients to query them via the Sidecar's REST API. + +Enabling and configuring the SSE Server of the Sidecar is optional. + +### The REST API server + +The Sidecar offers an optional REST API that allows clients to query the events stored in external storage. You can discover the specific endpoints of the REST API using [OpenAPI](#openapi-specification) and [Swagger](#swagger-documentation). The [usage instructions](USAGE.md) provide more details. + +```mermaid + graph LR; + CLIENT{Client} + CLIENT --> REST_API + STORAGE[("Storage")] + REST_API --> STORAGE + CONFIG{{"Config file (TOML)"}} + MAIN --1.reads--> CONFIG + subgraph CASPER_SIDECAR + MAIN[main.rs] + MAIN --2.spawns--> REST_API + REST_API["REST API"] + end +``` + +### The Admin API server + +The Sidecar offers an administrative API to allow an operator to check its current status. The Sidecar operator has the option to enable and configure this API. Please see the [admin server configuration](#admin-server) for details. + +```mermaid + graph LR; + CLIENT{Client} + CLIENT --> ADMIN_API + CONFIG{{Config file}} + MAIN --1.reads--> CONFIG + subgraph CASPER_SIDECAR + MAIN[main.rs] + MAIN --2.spawns--> ADMIN_API + ADMIN_API["ADMIN API"] + end +``` + +### The RPC API server + +The Sidecar also offers an RPC JSON API server that can be enabled and configured so that clients can interact with a Casper network. It is a JSON bridge between end users and a Casper node's binary port. The RPC API server forwards requests to the Casper node's binary port. For more details on how the RPC JSON API works, see the [RPC Sidecar README](rpc_sidecar/README.md). + +```mermaid + graph LR; + CLIENT{Client} + CLIENT --> RPC_API + CONFIG{{Config file}} + MAIN --1.reads--> CONFIG + CASPER_NODE(("Casper Node binary port")) + RPC_API --forwards request--> CASPER_NODE + subgraph "Casper Sidecar" + MAIN[main.rs] + MAIN --2.spawns--> RPC_API + RPC_API["RPC JSON API"] + end +``` + +## Configuring the Sidecar + +The Sidecar service must be configured using a `.toml` file specified at runtime. -![Sidecar Diagram](/images/SidecarDiagram.png) +This repository contains several sample configuration files that can be used as examples and adjusted according to your scenario: -Casper Nodes offer a Node Event Stream API returning Server-Sent Events (SSEs) that hold JSON-encoded data. The SSE Sidecar uses this API to achieve the following goals: +- [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository. +- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network. +- [EXAMPLE_NODE_CONFIG.toml](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network. -* Build a sidecar middleware service that reads the Event Stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes and their filters (i.e., `/main`, `/deploys`, and `/sigs` with support for the use of the `?start_from=` query to allow clients to get previously sent events from the Sidecar's buffer). +Once you create the configuration file and are ready to run the Sidecar service, you must provide the configuration as an argument using the `-- --path-to-config` option as described [here](#running-the-sidecar). -* Provide a new RESTful endpoint that is discoverable to node operators. See the [usage instructions](USAGE.md) for details. +### RPC server setup -The SSE Sidecar uses one ring buffer for outbound events, providing some robustness against unintended subscriber disconnects. If a disconnected subscriber re-subscribes before the buffer moves past their last received event, there will be no gap in the event history if they use the `start_from` URL query. +Here is an example configuration for the RPC API server: -## Prerequisites +```toml +[rpc_server.main_server] +enable_server = true +address = '0.0.0.0:7777' +qps_limit = 100 +max_body_bytes = 2_621_440 +cors_origin = '' -* CMake 3.1.4 or greater -* [Rust](https://www.rust-lang.org/tools/install) -* pkg-config -* gcc -* g++ +[rpc_server.node_client] +address = '0.0.0.0:28101' +max_message_size_bytes = 4_194_304 +message_timeout_secs = 10 +client_access_timeout_secs = 10 -## Configuration +[rpc_server.speculative_exec_server] +enable_server = true +address = '0.0.0.0:7778' +qps_limit = 1 +max_body_bytes = 2_621_440 +cors_origin = '' -The SSE Sidecar service must be configured using a `.toml` file specified at runtime. -This repository contains several sample configuration files that can be used as examples and adjusted according to your scenario: +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32_000 +coefficient = 2 +max_attempts = 30 +``` -- [EXAMPLE_NCTL_CONFIG.toml](./EXAMPLE_NCTL_CONFIG.toml) - Configuration for connecting to nodes on a local NCTL network. This configuration is used in the unit and integration tests found in this repository -- [EXAMPLE_NCTL_POSTGRES_CONFIG.toml](./EXAMPLE_NCTL_POSTGRES_CONFIG.toml) - Configuration for using the PostgreSQL database and nodes on a local NCTL network -- [EXAMPLE_NODE_CONFIG.toml](./EXAMPLE_NODE_CONFIG.toml) - Configuration for connecting to live nodes on a Casper network and setting up an admin server +- `main_server.enable_server` - The RPC API server will be enabled if set to true. +- `main_server.address` - Address under which the main RPC API server will be available. +- `main_server.qps_limit` - The maximum number of requests per second. +- `main_server.max_body_bytes` - Maximum body size of request to API in bytes. +- `main_server.cors_origin` - Configures the CORS origin. -Once you create the configuration file and are ready to run the Sidecar service, you must provide the configuration as an argument using the `-- --path-to-config` option as described [here](#running-the-sidecar). +- `speculative_exec_server.enable_server` - If set to true, the speculative RPC API server will be enabled. +- `speculative_exec_server.address` - Address under which the speculative RPC API server will be available. +- `speculative_exec_server.qps_limit` - The maximum number of requests per second. +- `speculative_exec_server.max_body_bytes` - Maximum body size of request to API in bytes. +- `speculative_exec_server.cors_origin` - Configures the CORS origin. -### Node Connections +- `node_client.address` - Address of the Casper Node binary port. +- `node_client.max_message_size_bytes` - Maximum binary port message size in bytes. +- `node_client.message_timeout_secs` - Timeout for the message. +- `node_client.client_access_timeout_secs` - Timeout for the client connection. -The Sidecar can connect to Casper nodes with versions greater or equal to `1.5.2`. +- `node_client.exponential_backoff.initial_delay_ms` - Timeout after the first broken connection (backoff) in milliseconds. +- `node_client.exponential_backoff.max_delay_ms` - Maximum timeout after a broken connection in milliseconds. +- `node_client.exponential_backoff.coefficient` - Coefficient for the exponential backoff. The next timeout is calculated as min(`current_timeout * coefficient`, `max_delay_ms`). +- `node_client.exponential_backoff.max_attempts` - Maximum number of times to try to reconnect to the binary port of the node. -The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. Connecting to multiple nodes requires multiple `[[connections]]` sections. +### SSE server setup +The Sidecar SSE server is used to connect to Casper nodes, listen to events from them, store them locally and re-broadcast them to clients. Here is a sample configuration for the SSE server: + +```toml +[sse_server] +enable_server = true +emulate_legacy_sse_apis = ["V1"] +disable_event_persistence = false + +[[sse_server.connections]] + + +[sse_server.event_stream_server] + ``` -[[connections]] + +- `sse_server.enable_server` - If set to true, the SSE server will be enabled. +- `sse_server.emulate_legacy_sse_apis` - A list of legacy Casper node SSE APIs to emulate. The Sidecar will expose SSE endpoints that are compatible with specified versions. Please bear in mind that this feature is an emulation and should be used only for transition periods. In most scenarios, having a 1-to-1 mapping of new messages into old formats is impossible, so this can be a process that loses some data and/or doesn't emit all messages that come from the Casper node. See the [Legacy SSE Emulation](./LEGACY_SSE_EMULATION.md) page for more details. +- `sse_server.disable_event_persistence` - If set to true, SSE server will not send events to storage. This is useful if you want to use sidecar only as a pass-through for sse events. The property is optional, if not defined it will behave as false. + +#### Configuring SSE node connections + +The Sidecar's SSE component can connect to Casper nodes' SSE endpoints with versions greater or equal to `2.0.0`. + +The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. Connecting to multiple nodes requires multiple `[[sse_server.connections]]` sections. + +```toml +[sse_server] +enable_server = true +disable_event_persistence = false + +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18101 rest_port = 14101 @@ -57,7 +290,7 @@ connection_timeout_in_seconds = 3 no_message_timeout_in_seconds = 60 sleep_between_keep_alive_checks_in_seconds = 30 -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18102 rest_port = 14102 @@ -69,7 +302,7 @@ connection_timeout_in_seconds = 3 no_message_timeout_in_seconds = 60 sleep_between_keep_alive_checks_in_seconds = 30 -[[connections]] +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 18103 rest_port = 14103 @@ -82,57 +315,108 @@ no_message_timeout_in_seconds = 60 sleep_between_keep_alive_checks_in_seconds = 30 ``` -* `ip_address` - The IP address of the node to monitor. -* `sse_port` - The node's event stream (SSE) port. This [example configuration](EXAMPLE_NODE_CONFIG.toml) uses port `9999`. -* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](EXAMPLE_NODE_CONFIG.toml) uses port `8888`. -* `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. -* `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. -* `allow_partial_connection` - Determining whether the Sidecar will allow a partial connection to this node. -* `enable_logging` - This enables the logging of events from the node in question. -* `connection_timeout_in_seconds` - Number of seconds before the connection request times out. Parameter is optional, defaults to 5 -* `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. Parameter is optional, defaults to 120 -* `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60 +- `ip_address` - The IP address of the node to monitor. +- `sse_port` - The node's event stream (SSE) port. This [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `9999`. +- `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml) uses port `8888`. +- `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. +- `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. +- `allow_partial_connection` - Determining whether the Sidecar will allow a partial connection to this node. +- `enable_logging` - This enables the logging of events from the node in question. +- `connection_timeout_in_seconds` - Number of seconds before the connection request times out. This parameter is optional, and defaults to 5. +- `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. This parameter is optional, and defaults to 120. +- `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60. -### Storage +#### Configuring SSE legacy emulations -This directory stores the SSE cache and an SQLite database if the Sidecar is configured to use SQLite. +Applications using version 1 of a Casper node's event stream server can still function using an emulated V1 SSE API for a limited time. Enabling the V1 SSE API emulation requires the `emulate_legacy_sse_apis` setting to be `["V1"]`: +```toml +[sse_server] +enable_server = true +emulate_legacy_sse_apis = ["V1"] +disable_event_persistence = false ``` + +This setting will expose three legacy SSE endpoints with the following events streamed on each endpoint: + +- `/events/sigs` - Finality Signature events +- `/events/deploys` - DeployAccepted events +- `/events/main` - All other legacy events, including BlockAdded, DeployProcessed, DeployExpired, Fault, Step, and Shutdown events + +See the [Legacy SSE Emulation](./LEGACY_SSE_EMULATION.md) page for more details. + +#### Configuring the event stream + +To configure the Sidecar's event stream server, specify the following settings: + +```toml +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 +``` + +- `event_stream_server.port` - The port under which the Sidecar's SSE server publishes events. +- `event_stream_server.max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. +- `event_stream_server.event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. + +### REST server setup + +The following section determines outbound connection criteria for the Sidecar's REST server. + +```toml +[rest_api_server] +enable_server = true +port = 18888 +max_concurrent_requests = 50 +max_requests_per_second = 50 +request_timeout_in_seconds = 10 +``` + +- `enable_server` - If set to true, the RPC API server will be enabled. +- `port` - The port for accessing the Sidecar's REST server. `18888` is the default, but operators are free to choose their own port as needed. +- `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. +- `max_requests_per_second` - The maximum total number of requests that can be made per second. +- `request_timeout_in_seconds` - The total time before a request times out. + +### Storage setup + +This directory stores the SSE cache and an SQLite database if the Sidecar was configured to use SQLite. + +```toml [storage] -storage_path = "./target/storage" +storage_folder = "./target/storage" ``` -### Database Connectivity +### Database connectivity setup -The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection for one of these DBs. Note that the Sidecar can only connect to one DB at a time. +The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection. Note that the Sidecar can only connect to one database at a time. -#### SQLite Database +#### SQLite database This section includes configurations for the SQLite database. -``` +```toml [storage.sqlite_config] file_name = "sqlite_database.db3" max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 ``` -* `file_name` - The database file path. -* `max_connections_in_pool` - The maximum number of connections to the database. (Should generally be left as is.) -* `wal_autocheckpointing_interval` - This controls how often the system commits pages to the database. The value determines the maximum number of pages before forcing a commit. More information can be found [here](https://www.sqlite.org/compile.html#default_wal_autocheckpoint). +- `storage.sqlite_config.file_name` - The database file name. The base folder where this file will be stored comes from `storage.storage_folder`. +- `storage.sqlite_config.max_connections_in_pool` - The maximum number of connections to the database (should generally be left as is). +- `storage.sqlite_config.wal_autocheckpointing_interval` - This controls how often the system commits pages to the database. The value determines the maximum number of pages before forcing a commit. More information can be found [here](https://www.sqlite.org/compile.html#default_wal_autocheckpoint). -#### PostgreSQL Database +#### PostgreSQL database The properties listed below are elements of the PostgreSQL database connection that can be configured for the Sidecar. -* `database_name` - Name of the database. -* `host` - URL to PostgreSQL instance. -* `database_username` - Username. -* `database_password` - Database password. -* `max_connections_in_pool` - The maximum number of connections to the database. -* `port` - The port for the database connection. - +- `storage.postgresql_config.database_name` - Name of the database. +- `storage.postgresql_config.host` - URL to PostgreSQL instance. +- `storage.postgresql_config.database_username` - Username. +- `storage.postgresql_config.database_password` - Database password. +- `storage.postgresql_config.max_connections_in_pool` - The maximum number of connections to the database. +- `storage.postgresql_config.port` - The port for the database connection. To run the Sidecar with PostgreSQL, you can set the following database environment variables to control how the Sidecar connects to the database. This is the suggested method to set the connection information for the PostgreSQL database. @@ -160,13 +444,11 @@ SIDECAR_POSTGRES_MAX_CONNECTIONS="max connections" SIDECAR_POSTGRES_PORT="port" ``` -However, DB connectivity can also be configured using the Sidecar configuration file. - -If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. +However, DB connectivity can also be configured using the Sidecar configuration file. If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. It is possible to completely omit the PostgreSQL configuration from the Sidecar's configuration file. In this case, the Sidecar will attempt to connect to the PostgreSQL using the database environment variables or use some default values for non-critical variables. -``` +```toml [storage.postgresql_config] database_name = "event_sidecar" host = "localhost" @@ -175,109 +457,92 @@ database_username = "postgres" max_connections_in_pool = 30 ``` -### Rest & Event Stream Criteria +### Admin server setup -This information determines outbound connection criteria for the Sidecar's `rest_server`. +This optional section configures the Sidecar's administrative server. If this section is not specified, the Sidecar will not start an admin server. -``` -[rest_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 -request_timeout_in_seconds = 10 +```toml +[admin_api_server] +enable_server = true +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 ``` -* `port` - The port for accessing the sidecar's `rest_server`. `18888` is the default, but operators are free to choose their own port as needed. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. -* `max_requests_per_second` - The maximum total number of requests that can be made per second. -* `request_timeout_in_seconds` - The total time before a request times out. +- `enable_server` - If set to true, the RPC API server will be enabled. +- `port` - The port for accessing the Sidecar's admin server. +- `max_concurrent_requests` - The maximum total number of simultaneous requests that can be sent to the admin server. +- `max_requests_per_second` - The maximum total number of requests that can be sent per second to the admin server. -``` -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 -``` +Access the admin server at `http://localhost:18887/metrics/`. -The `event_stream_server` section specifies a port for the Sidecar's event stream. +## Running and Testing the Sidecar -Additionally, there are the following two options: +### Prerequisites -* `max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. -* `event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. +To compile, test, and run the Sidecar, install the following software first: -### Admin Server +- CMake 3.1.4 or greater +- [Rust](https://www.rust-lang.org/tools/install) +- pkg-config +- gcc +- g++ -This optional section configures the Sidecar's administrative server. If this section is not specified, the Sidecar will not start an admin server. +### Running the Sidecar -``` -[admin_server] -port = 18887 -max_concurrent_requests = 1 -max_requests_per_second = 1 +After creating the configuration file, run the Sidecar using `cargo` and point to the configuration file using the `--path-to-config` option, as shown below. The command needs to run with `root` privileges. + +```sh +sudo cargo run -- --path-to-config ./resources/example_configs/EXAMPLE_NODE_CONFIG.toml ``` -* `port` - The port for accessing the Sidecar's admin server. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be sent to the admin server. -* `max_requests_per_second` - The maximum total number of requests that can be sent per second to the admin server. +The Sidecar application leverages tracing, which can be controlled by setting the `RUST_LOG` environment variable. -Access the admin server at `http://localhost:18887/metrics/`. +The following command will run the Sidecar application with the `INFO` log level. -## Swagger Documentation +```sh +RUST_LOG=info cargo run -p casper-sidecar -- --path-to-config ./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml +``` -Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. The Swagger documentation will allow you to test the REST API. +The log levels, listed in order of increasing verbosity, are: -## OpenAPI Specification +- `ERROR` +- `WARN` +- `INFO` +- `DEBUG` +- `TRACE` -An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. +Further details about log levels can be found [here](https://docs.rs/env_logger/0.9.1/env_logger/#enabling-logging). -## Unit Testing the Sidecar +### Testing the Sidecar You can run the unit and integration tests included in this repository with the following command: -``` +```sh cargo test ``` -You can also run the performance tests using the following command: +You can also run the performance tests using this command: -``` +```sh cargo test -- --include-ignored ``` -The [EXAMPLE_NCTL_CONFIG.toml](./EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. - -## Running the Sidecar +The [EXAMPLE_NCTL_CONFIG.toml](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) file contains the configurations used for these tests. -After creating the configuration file, run the Sidecar using Cargo and point to the configuration file using the `--path-to-config` option, as shown below. The command needs to run with `root` privileges. +#### Testing the Sidecar using NCTL -```shell -sudo cargo run -- --path-to-config EXAMPLE_NODE_CONFIG.toml -``` - -The Sidecar application leverages tracing, which can be controlled by setting the `RUST_LOG` environment variable. - -The following command will run the sidecar application with the `INFO` log level. - -``` -RUST_LOG=info cargo run -p casper-event-sidecar -- --path-to-config EXAMPLE_NCTL_CONFIG.toml -``` - -The log levels, listed in order of increasing verbosity, are: +The Sidecar application can be tested against live Casper nodes or a local [NCTL network](https://docs.casperlabs.io/dapp-dev-guide/building-dapps/setup-nctl/). -* `ERROR` -* `WARN` -* `INFO` -* `DEBUG` -* `TRACE` +The configuration shown [here](./resources/example_configs/EXAMPLE_NCTL_CONFIG.toml) will direct the Sidecar application to a locally hosted NCTL network if one is running. The Sidecar should function the same way it would while connected to a live node, displaying events as they occur in the local NCTL network. -Further details about log levels can be found [here](https://docs.rs/env_logger/0.9.1/env_logger/#enabling-logging). +## Swagger Documentation -## Testing the Sidecar using NCTL +Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. The Swagger documentation will allow you to test the REST API. -The Sidecar application can be tested against live Casper nodes or a local [NCTL network](https://docs.casperlabs.io/dapp-dev-guide/building-dapps/setup-nctl/). +## OpenAPI Specification -The configuration shown within this README will direct the Sidecar application to a locally hosted NCTL network if one is running. The Sidecar should function the same way it would with a live node, displaying events as they occur in the local NCTL network. +An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. You need to replace `localhost` with the IP address of the machine running the Sidecar application if you are running the Sidecar remotely. ## Troubleshooting Tips @@ -307,8 +572,8 @@ curl http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics **Sample output**: -``` -# HELP node_statuses Current status of node to which sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - defunct -> used up all connection attempts ; -2 - defunct -> node is in an incompatible version +```sh +# HELP node_statuses Current status of node to which the Sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - connections_exhausted -> used up all connection attempts ; -2 - incompatible -> node is in an incompatible version # TYPE node_statuses gauge node_statuses{node="35.180.42.211:9999"} 2 node_statuses{node="69.197.42.27:9999"} 2 @@ -323,13 +588,11 @@ In the above `node_statuses`, you can see which nodes are connecting, which are - `-1` - The Sidecar is not connected and has reached the maximum connection attempts - `-2` - The Sidecar is not connected due to an incompatible node version - ### Diagnosing errors - To diagnose errors, look for `error` logs and check the `error_counts` on the metrics page, `http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics`, where most of the errors related to data flow will be stored: -``` +```sh # HELP error_counts Error counts # TYPE error_counts counter error_counts{category="connection_manager",description="fetching_from_stream_failed"} 6 @@ -339,13 +602,13 @@ error_counts{category="connection_manager",description="fetching_from_stream_fai To monitor the Sidecar's memory consumption, observe the metrics page, `http://SIDECAR_URL:SIDECAR_ADMIN_PORT/metrics`. Search for `process_resident_memory_bytes`: -``` +```sh # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 292110336 ``` -If memory consumption is high without an apparent reason, please inform the Sidecar team by creating an [issue in GitHub](https://github.com/CasperLabs/event-sidecar/issues). +If memory consumption is high without an apparent reason, please inform the Sidecar team by creating an [issue in GitHub](https://github.com/casper-network/casper-sidecar/issues). Remember to check the `event_stream_buffer_length` setting in the configuration because it dramatically impacts how much memory the Sidecar consumes. Also, some events, like step events, consume more memory. @@ -362,3 +625,7 @@ The easiest way to inspect the Sidecar’s REST API is with [Swagger](#swagger-d The Sidecar can be configured to limit concurrent requests (`max_concurrent_requests`) and requests per second (`max_requests_per_second`) for the REST and admin servers. However, remember that those are application-level guards, meaning that the operating system already accepted the connection, which used up the operating system's resources. Limiting potential DDoS attacks requires consideration before the requests are directed to the Sidecar application. + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/USAGE.md b/USAGE.md index ea2aeed2..66cb5dd0 100644 --- a/USAGE.md +++ b/USAGE.md @@ -1,4 +1,4 @@ -# Casper Event Sidecar USAGE +# Casper Sidecar USAGE This document describes how to consume events and perform queries using the Sidecar, covering the following topics: @@ -14,69 +14,51 @@ This document describes how to consume events and perform queries using the Side The Sidecar event stream is a passthrough for all the events emitted by the node(s) to which the Sidecar connects. This stream also includes one endpoint for Sidecar-generated events that can be useful, although the node did not emit them. -Events are divided into four categories and emitted on their respective endpoints: - -- **Deploy events** - Associated with Deploys on a node and emitted on the `events/deploys` endpoint. Currently, only a `DeployAccepted` event is emitted. The URL to consume these events using Sidecar on a Mainnet or Testnet node is `http://:19999/events/deploys/`. -- **Finality Signature events** - Emitted on the `events/sigs` endpoint when a block has been finalized and cannot be altered. The URL to consume finality signature events using Sidecar on a Mainnet or Testnet node is `http://:19999/events/sigs/`. -- **Main events** - All other events are emitted on the `events/main` endpoint, including `BlockAdded`, `DeployProcessed`, `DeployExpired`, `Fault`, and `Step` events. The URL to consume these events using Sidecar on a Mainnet or Testnet node is `http://:19999/events/main/`. -- **Sidecar-generated events** - The Sidecar also emits events on the `events/sidecar` endpoint, designated for events originating solely from the Sidecar service. The URL to consume these events using Sidecar on a Mainnet or Testnet node is `http://:19999/events/sidecar/`. +Events are emitted on two endpoints: +* All events that come from a node are re-emitted under `http://:/events`. +* All Sidecar-generated events reporting the Sidecar's internal state are emitted under `http://:/events/sidecar`. For more information on various event types emitted by the node, visit the [Monitoring and Consuming Events](https://docs.casperlabs.io/developers/dapps/monitor-and-consume-events/#event-types) documentation. -### Monitoring the Sidecar Event Stream +### Monitoring the Sidecar event stream It is possible to monitor the Sidecar event stream using *cURL*, depending on how the HOST and PORT are configured. -```json -curl -s http:///events/ +The Sidecar can connect to Casper nodes with versions greater or equal to `2.0.0`. + +```sh +curl -s http:///events ``` - `HOST` - The IP address where the Sidecar is running - `PORT` - The port number where the Sidecar emits events -- `TYPE` - The type of event emitted - -Given this [example configuration](EXAMPLE_NODE_CONFIG.toml), here are the commands for each endpoint: - -- **Deploy events:** - - ```json - curl -sN http://127.0.0.1:19999/events/deploys - ``` -- **Finality Signature events:** +Given this [example configuration](./resources/example_configs/EXAMPLE_NODE_CONFIG.toml), here are the commands for each endpoint: - ```json - curl -sN http://127.0.0.1:19999/events/sigs - ``` - -- **Main events:** - - ```json - curl -sN http://127.0.0.1:19999/events/main - ``` - -- **Sidecar-generated events:** +```sh +curl -sN http://127.0.0.1:19999/events +``` - ```json - curl -sN http://127.0.0.1:19999/events/sidecar - ``` +Also, the Sidecar exposes an endpoint for Sidecar-generated events: -### The API Version of Node Events +```sh +curl -sN http://127.0.0.1:19999/events/sidecar +``` -An `ApiVersion` event is always emitted when a new client connects to a node's SSE server, informing the client of the node's software version. +### Node events versioning -When a client connects to the Sidecar, the Sidecar displays the node’s API version, `ApiVersion`, which it receives from the node. Then, it starts streaming the events coming from the node. The `ApiVersion` may differ from the node’s build version. +An `ApiVersion` event is always emitted when the Sidecar connects to a node's SSE server, broadcasting the node's software version. Then, the Sidecar starts streaming the events coming from the node. Note that the `ApiVersion` may differ from the node’s build version. If the node goes offline, the `ApiVersion` may differ when it restarts (i.e., in the case of an upgrade). In this case, the Sidecar will report the new `ApiVersion` to its client. If the node’s `ApiVersion` has not changed, the Sidecar will not report the version again and will continue to stream messages that use the previous version. -Here is an example of what the API version would look like while listening on the Sidecar’s `DeployAccepted` event stream: +Here is an example of what the API version would look like while listening on the Sidecar’s event stream. The colons represent "keep-alive" messages. -``` -curl -sN http://127.0.0.1:19999/events/deploys +```sh +curl -sN http://127.0.0.1:19999/events -data:{"ApiVersion":"1.4.8"} +data:{"ApiVersion":"2.0.0"} -data:{"DeployAccepted":{"hash":"00eea4fb9baa37af401cba8ffb96a1b96d594234908cb5f9de50effcb5b1c5aa","header":{"account":"0202ed20f3a93b5386bc41b6945722b2bd4250c48f5fa0632adf546e2f3ff6f4ddee","timestamp":"2023-02-28T12:21:14.604Z","ttl":"30m","gas_price":1,"body_hash":"f06261b964600caf712a3ea0dc54448c3fcc008638368580eb4de6832dce8698","dependencies":[],"chain_name":"casper"},"payment":{"ModuleBytes":{"module_bytes":"","args":[["amount",{"cl_type":"U512","bytes":"0400e1f505","parsed":"100000000"}]]}},"session":{"Transfer":{"args":[["amount",{"cl_type":"U512","bytes":"05205d59d832","parsed":"218378100000"}],["target",{"cl_type":{"ByteArray":32},"bytes":"6fbe4634d42aa1ae7820eed35bcbd5c687de5c464e5348650b49a21a17c8dcb5","parsed":"6fbe4634d42aa1ae7820eed35bcbd5c687de5c464e5348650b49a21a17c8dcb5"}],["id",{"cl_type":{"Option":"U64"},"bytes":"00","parsed":null}]]}},"approvals":[{"signer":"0202ed20f3a93b5386bc41b6945722b2bd4250c48f5fa0632adf546e2f3ff6f4ddee","signature":"02b519ecb34f954aeb7afede122c6f999b2124022f6b653304b2891c5428b074795ad9232a409aa0d3e601471331ea50143ca4c378306ffcd0f8ff7a60e13f19db"}]}} +data:{"TransactionProcessed": {"transaction_hash": {"Version1": "56642d06d9642c512a7bf55413108ce65bfd1105361bf36ff3586998529e116b" }, "initiator_addr": {"PublicKey": "014962b395b25a89cf970340fb51da2adbfb0f5836716e26dbae6754e79e01ab68" }, "timestamp": "2020-08-07T01:22:10.209Z", "ttl": "11h 9m 50s 128ms", "block_hash": "08ad20808db3098e4461182d18c6efd68db6b01f4e22d4005bfdc4f007a7c0d0", "execution_result": {"Version1": {"Failure": {"effect": {"operations": [], "transforms": [ {"key": "12570563858918177191", "transform": "Identity" }, {"key": "14635000063685912943", "transform": {"AddUInt64": 5592565879698622687 } } ] }, "transfers": [ "transfer-9a9304069e5a68e408824ba9a16a99bb50179926f58023371ef82cc9565d68fb" ], "cost": "3760779910350774860", "error_message": "Error message 15494687491298509010" } } }, "messages": [ {"entity_addr": "addressable-entity-68c22b361a3a74f49dde2873f93d8485e9a08cc14c7f154b46a25435ca8ef449", "message": {"String": "Va6WL5U9dFhLbG3HCJQvuqcA46EslCY9fymlYbHqpvFlo4PeUs0nUVgeXavUIYc7" }, "block_index": 0, "topic_index": 0, "topic_name": "QnlypxwtJpoTOF8opgGiuGYseeNvcU5A", "topic_name_hash": "e9c77898578d8d1e5063cf3c7c60ca048b8176f10ba1684c2f05961a152acfa7", "index": 3213106390 }, {"entity_addr": "addressable-entity-063582249fa5823b94f883f6c784e3b5b9742780b7fa7c0549823be7debc7680", "message": {"String": "nwOXDbkcq5xEyDxONQizPdBIpWpPi1SBtLCws0a3F0v1nu7FyjbvjErKOjAYYwg0" }, "block_index": 1, "topic_index": 1, "topic_name": "hQLIE3k8zWLnslrmN9RRROhLk4g2LxeQ", "topic_name_hash": "1747f053151847f43ae3b8cac607dc7bb672aa3aec1c2bbb7e3a866613fe3803", "index": 1507321819 }, {"entity_addr": "addressable-entity-acbce74845514977568693e79876f60a9fd0459a4419cc8392820cce7c25ca8e", "message": {"String": "w7aWCBO3uIjQf91hjSFZ6xog0w8b6HyPAVW5iBUFVx7XWPOho7tLrw6a3DpJMA9o" }, "block_index": 2, "topic_index": 2, "topic_name": "TNXBnGjXCGANWJK4YSvD5HUZnoWRQGRn", "topic_name_hash": "f7c3f5fa51fd729bc3af86f724e764e66efae425aa47025ec0dd88f8c062baad", "index": 1303188972 }, {"entity_addr": "addressable-entity-8f1f553f3ca14a9510557cd85e42a7e0269d4a344e74cf1e83d9751e875559f0", "message": {"String": "ZJsXLKE3V08ihPnxZxtZmDffb68zl6A4vsVQsYkSCm8Tvg8RCGNXRWOR6c12zphq" }, "block_index": 3, "topic_index": 3, "topic_name": "sfYdJVcjs68cwCpd9pSeQ7NwWdvLi2Q0", "topic_name_hash": "16b791cf5685e45ecf6a41c3442173ca2bf6c8b6971ada579420b3e28803c992", "index": 1637472264 } ] }} id:21821471 : @@ -86,17 +68,13 @@ id:21821471 : ``` -#### Middleware Mode - -The Sidecar can connect simultaneously to nodes with different build versions, which send messages with different API versions. There is also the rare possibility of nodes changing API versions and not being in sync with other connected nodes. Although this situation would be rare, clients should be able to parse messages with different API versions. - ->**Note**: The Sidecar can connect to Casper nodes with versions greater or equal to `1.5.2`. +>**Note**: The Sidecar can connect simultaneously to nodes with different build versions, which send messages with different API versions. There is also the rare possibility of nodes changing API versions and not being in sync with other connected nodes. Although this situation would be rare, clients should be able to parse messages with different API versions. -### The Version of Sidecar Events +### Sidecar events versioning When a client connects to the `events/sidecar` endpoint, it will receive a message containing the version of the Sidecar software. Release version `1.1.0` would look like this: -``` +```sh curl -sN http://127.0.0.1:19999/events/sidecar data:{"SidecarVersion":"1.1.0"} @@ -104,25 +82,24 @@ data:{"SidecarVersion":"1.1.0"} : : - ``` Note that the SidecarVersion differs from the APIVersion emitted by the node event streams. You will also see the keep-alive messages as colons, ensuring the connection is active. -### The Node Shutdown Event +### The node's Shutdown event -When the node sends a Shutdown event and disconnects from the Sidecar, the Sidecar will report it as part of the event stream and on the `/events/deploys` endpoint. The Sidecar will continue to operate and attempt to reconnect to the node according to the `max_attempts` and `delay_between_retries_in_seconds` settings specified in its configuration. +When the node sends a Shutdown event and disconnects from the Sidecar, the Sidecar will report it as part of the event stream on the `/events` endpoint. The Sidecar will continue to operate and attempt to reconnect to the node according to the `max_attempts` and `delay_between_retries_in_seconds` settings specified in its configuration. The Sidecar does not expose Shutdown events via its REST API. Here is an example of how the stream might look like if the node went offline for an upgrade and came back online after a Shutdown event with a new `ApiVersion`: -``` -curl -sN http://127.0.0.1:19999/events/deploys +```sh +curl -sN http://127.0.0.1:19999/events -data:{"ApiVersion":"1.5.2"} +data:{"ApiVersion":"2.0.0"} -data:{"BlockAdded":{"block_hash":"b487aae22b406e303d96fc44b092f993df6f3b43ceee7b7f5b1f361f676492d6","block":{"hash":"b487aae22b406e303d96fc44b092f993df6f3b43ceee7b7f5b1f361f676492d6","header":{"parent_hash":"4a28718301a83a43563ec42a184294725b8dd188aad7a9fceb8a2fa1400c680e","state_root_hash":"63274671f2a860e39bb029d289e688526e4828b70c79c678649748e5e376cb07","body_hash":"6da90c09f3fc4559d27b9fff59ab2453be5752260b07aec65e0e3a61734f656a","random_bit":true,"accumulated_seed":"c8b4f30a3e3e082f4f206f972e423ffb23d152ca34241ff94ba76189716b61da","era_end":{"era_report":{"equivocators":[],"rewards":{"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80":1559401400039,"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8":25895190891},"inactive_validators":[]},"next_era_validator_weights":{"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80":"50538244651768072","010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8":"839230678448335"}},"timestamp":"2021-04-08T05:14:14.912Z","era_id":90,"height":1679394427512,"protocol_version":"1.0.0"},"body":{"proposer":"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b","deploy_hashes":[],"transfer_hashes":[]}}}} +data:{"BlockAdded":{"block_hash":"bb5332a4f0feae6a760d67b3e2a24adf4599aaf6845584f20d80f037e2505f69","block":{"Version2":{"hash":"bb5332a4f0feae6a760d67b3e2a24adf4599aaf6845584f20d80f037e2505f69","header":{"parent_hash":"4c1fb7a23f0de75e14ef5077dbf6ffedbdf2c4a26c2e5890f2694be1be9c78de","state_root_hash":"e7e75dd4500801195276096ffe274973e8da2b73430138bd4d9c1804f658d277","body_hash":"a8f9c258f7276ca6ab2788c5df78ac4a94480a327de9d4675c2b528bb0e7faed","random_bit":true,"accumulated_seed":"630d9b48148044845d91867646685a3a85ec2ddc11634a935aa0b22e248bc17d","era_end":null,"timestamp":"2024-03-19T15:17:09.163Z","era_id":178172,"height":1781728,"protocol_version":"2.0.0"},"body":{"proposer":"0202b55941afeb1ec56170b12752f5a592e3d8fe222e4f9830eca538e667c790f2ae","mint":[],"auction":[],"install_upgrade":[],"standard":[],"rewarded_signatures":[]}}}}} id:1 : @@ -136,26 +113,25 @@ id:2 : -data:{"ApiVersion":"1.5.2"} +data:{"ApiVersion":"2.0.1"} -data:{"BlockAdded":{"block_hash":"1c76e7abf5780b49d3a66beef7b75bbf261834f494dededb8f2e349735659c03","block":{"hash":"1c76e7abf5780b49d3a66beef7b75bbf261834f494dededb8f2e349735659c03","header":{"parent_hash":"4a28718301a83a43563ec42a184294725b8dd188aad7a9fceb8a2fa1400c680e","state_root_hash":"63274671f2a860e39bb029d289e688526e4828b70c79c678649748e5e376cb07","body_hash":"6da90c09f3fc4559d27b9fff59ab2453be5752260b07aec65e0e3a61734f656a","random_bit":true,"accumulated_seed":"c8b4f30a3e3e082f4f206f972e423ffb23d152ca34241ff94ba76189716b61da","era_end":{"era_report":{"equivocators":[],"rewards":[{"validator":"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80","amount":1559401400039},{"validator":"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8","amount":25895190891}],"inactive_validators":[]},"next_era_validator_weights":[{"validator":"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80","weight":"50538244651768072"},{"validator":"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8","weight":"839230678448335"}]},"timestamp":"2021-04-08T05:14:14.912Z","era_id":90,"height":1679394457791,"protocol_version":"1.0.0"},"body":{"proposer":"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b","deploy_hashes":[],"transfer_hashes":[]},"proofs":[]}}} +data:{"BlockAdded":{"block_hash":"8d7b333799ed9d0dd8764d75947c618ae0a198cf6551e4026521011b31a53934","block":{"Version2":{"hash":"8d7b333799ed9d0dd8764d75947c618ae0a198cf6551e4026521011b31a53934","header":{"parent_hash":"98789674cd19222df62d9bf7293642a6193ad60eec204802cd1f3ea9a601a8af","state_root_hash":"d4386260b30b66704c6d99c70b01afe09671f29b8cb6ed69afae0abeef4a84e3","body_hash":"85210d3bf069c9f534b4af9c8ddc8cd63ef971f4c9d7f4d3dcbc57c5164a0737","random_bit":true,"accumulated_seed":"2787dcda83de66d13502aad716ac4469efda1f3072bece0c11bd902d3cdcbeaa","era_end":null,"timestamp":"2024-03-20T14:45:55.936Z","era_id":895818,"height":8958184,"protocol_version":"1.0.0","current_gas_price":1},"body":{"proposer":"014e6a488e8cb7c64ee7ca1263e8b3df15e8e5cc28512bd7d5a17fd210d00b0947","mint":[],"auction":[],"install_upgrade":[],"standard":[],"rewarded_signatures":[]}}}}} id:3 : : - ``` Note that the Sidecar can emit another type of shutdown event on the `events/sidecar` endpoint, as described below. -### The Sidecar Shutdown Event +### The Sidecar Shutdown event If the Sidecar attempts to connect to a node that does not come back online within the maximum number of reconnection attempts, the Sidecar will start a controlled shutdown process. It will emit a Sidecar-specific Shutdown event on the [events/sidecar](#the-sidecar-shutdown-event) endpoint, designated for events originating solely from the Sidecar service. The other event streams do not get this message because they only emit messages from the node. -The message structure of the Sidecar shutdown event is the same as the [node shutdown event](#the-node-shutdown-event). The sidecar event stream would look like this: +The message structure of the Sidecar shutdown event is the same as the [node shutdown event](#the-node-shutdown-event). The Sidecar event stream would look like this: -``` +```sh curl -sN http://127.0.0.1:19999/events/sidecar data:{"SidecarVersion":"1.1.0"} @@ -170,11 +146,29 @@ data:"Shutdown" id:8 ``` +## Replaying the Event Stream + +This command will replay the event stream from an old event onward. The server will replay all the cached events if the ID is 0 or if you specify an event ID already purged from the node's cache. + +Replace the `HOST`, `PORT`, and `ID` fields with the values needed. + +```sh +curl -sN http://HOST:PORT/events?start_from=ID +``` + +**Example:** + +```sh +curl -sN http://65.21.235.219:9999/events?start_from=29267508 +``` + +Note that certain shells like `zsh` may require an escape character before the question mark. + ## The REST Server The Sidecar provides a RESTful endpoint for useful queries about the state of the network. -### Latest Block +### Latest block Retrieve information about the last block added to the linear chain. @@ -182,7 +176,7 @@ The path URL is `/block`. Example: -```json +```sh curl -s http://127.0.0.1:18888/block ``` @@ -190,13 +184,13 @@ curl -s http://127.0.0.1:18888/block Sample output ```json -{"block_hash":"95b0d7b7e94eb79a7d2c79f66e2324474fc8f54536b9e6b447413fa6d00c2581","block":{"hash":"95b0d7b7e94eb79a7d2c79f66e2324474fc8f54536b9e6b447413fa6d00c2581","header":{"parent_hash":"48a99605ed4d1b27f9ddf8a1a0819c576bec57dd7a1b105247e48a5165b4194b","state_root_hash":"8d439b84b62e0a30f8e115047ce31c5ddeb30bd46eba3de9715412c2979be26e","body_hash":"b34c6c6ea69669597578a1912548ef823f627fe667ddcdb6bcd000acd27c7a2f","random_bit":true,"accumulated_seed":"058b14c76832b32e8cd00750e767c60f407fb13b3b0c1e63aea2d6526202924d","era_end":null,"timestamp":"2022-11-20T12:44:22.912Z","era_id":7173,"height":1277846,"protocol_version":"1.5.2"},"body":{"proposer":"0169e1552a97843ff2ef4318e8a028a9f4ed0c16b3d96f6a6eee21e6ca0d4022bc","deploy_hashes":[],"transfer_hashes":["d2193e27d6f269a6f4e0ede0cca805baa861d553df8c9f438cc7af56acf40c2b"]},"proofs":[]}} +{"block_hash":"d32550922798f6f70499f171030d30b12c2cde967f72cff98a0f987663789f89","block":{"Version2":{"hash":"d32550922798f6f70499f171030d30b12c2cde967f72cff98a0f987663789f89","header":{"parent_hash":"676a0a1a5b3e57c1710ccc379b788b4e81773b19c8f4586387a15288c914b1de","state_root_hash":"e25977c41e7a0cea644508ddda67de0837beac112c422dee45ada119b445f188","body_hash":"1c28072d52682b36616a32c44a261c1b44ad386cf9139df2c10c6f1a31584747","random_bit":false,"accumulated_seed":"cfd7817242fe89bcfe4e74cd7122d43047c247ac064151cd10d21c82d62be676","era_end":null,"timestamp":"2024-03-20T09:26:25.460Z","era_id":773313,"height":7733136,"protocol_version":"2.0.0"},"body":{"proposer":"02037f0605b63fe1ee16e852d45fc223b1196602d2028e5dd4ea90ad8e0b0d7006c1","mint":[],"auction":[],"install_upgrade":[],"standard":[],"rewarded_signatures":[]}}}} ```

-### Block by Hash +### Block by hash Retrieve information about a block given its block hash. @@ -204,20 +198,20 @@ The path URL is `/block/`. Enter a valid block hash. Example: -```json -curl -s http://127.0.0.1:18888/block/96a989a7f4514909b442faba3acbf643378fb7f57f9c9e32013fdfad64e3c8a5 +```sh +curl -s http://127.0.0.1:18888/block/bd2e0c36150a74f50d9884e38a0955f8b1cba94821b9828c5f54d8929d6151bc ```
Sample output ```json -{"block_hash":"96a989a7f4514909b442faba3acbf643378fb7f57f9c9e32013fdfad64e3c8a5","block":{"hash":"96a989a7f4514909b442faba3acbf643378fb7f57f9c9e32013fdfad64e3c8a5","header":{"parent_hash":"8f29120995ae6942d1a48cc4ac8dc3be5de5886f1fb53140356c907f1a70d7ef","state_root_hash":"c8964dddfe3660f481f750c5acd776fe7e08c1e168a4184707d07da6bac5397c","body_hash":"31984faf50cfb2b96774e388a16407cbf362b66d22e1d55201cc0709fa3e1803","random_bit":false,"accumulated_seed":"5ce60583fc1a8b3da07900b7223636eadd97ea8eef6abec28cdbe4b3326c1d6c","era_end":null,"timestamp":"2022-11-20T18:36:05.504Z","era_id":7175,"height":1278485,"protocol_version":"1.5.2"},"body":{"proposer":"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8","deploy_hashes":[],"transfer_hashes":[]},"proofs":[]}} +{"block_hash":"bd2e0c36150a74f50d9884e38a0955f8b1cba94821b9828c5f54d8929d6151bc","block":{"Version2":{"hash":"bd2e0c36150a74f50d9884e38a0955f8b1cba94821b9828c5f54d8929d6151bc","header":{"parent_hash":"9fffc8f07c11910721850f696fbcc73eb1e9152f333d51d495a45b1b71b4262d","state_root_hash":"190b1c706a65f04e6a8777faa11011d28aefc3830facfeddd4fea5dd06274411","body_hash":"720e4822481a4a14ffd9175bb88d2f9a9976d527f0f9c72c515ab73c99a97cb8","random_bit":true,"accumulated_seed":"8bb2a7a8e973574adb81faa6a7853051a26024bc6a9af80178e372a40edadbff","era_end":null,"timestamp":"2024-03-20T09:27:04.342Z","era_id":644446,"height":6444466,"protocol_version":"2.0.0"},"body":{"proposer":"0203e58aea33501ce2e28c2e30f88d176755fbf9cd3724c6e0f0e7a1733368db3384","mint":[],"auction":[],"install_upgrade":[],"standard":[],"rewarded_signatures":[]}}}} ```


-### Block by Height +### Block by chain height Retrieve information about a block, given a specific block height. @@ -225,133 +219,142 @@ The path URL is `/block/`. Enter a valid number represe Example: -```json -curl -s http://127.0.0.1:18888/block/1278485 +```sh +curl -s http://127.0.0.1:18888/block/336460 ```
Sample output ```json -{"block_hash":"96a989a7f4514909b442faba3acbf643378fb7f57f9c9e32013fdfad64e3c8a5","block":{"hash":"96a989a7f4514909b442faba3acbf643378fb7f57f9c9e32013fdfad64e3c8a5","header":{"parent_hash":"8f29120995ae6942d1a48cc4ac8dc3be5de5886f1fb53140356c907f1a70d7ef","state_root_hash":"c8964dddfe3660f481f750c5acd776fe7e08c1e168a4184707d07da6bac5397c","body_hash":"31984faf50cfb2b96774e388a16407cbf362b66d22e1d55201cc0709fa3e1803","random_bit":false,"accumulated_seed":"5ce60583fc1a8b3da07900b7223636eadd97ea8eef6abec28cdbe4b3326c1d6c","era_end":null,"timestamp":"2022-11-20T18:36:05.504Z","era_id":7175,"height":1278485,"protocol_version":"1.5.2"},"body":{"proposer":"017de9688caedd0718baed968179ddbe0b0532a8ef0a9a1cb9dfabe9b0f6016fa8","deploy_hashes":[],"transfer_hashes":[]},"proofs":[]}} +{"block_hash":"2c1a1bda792d123d8ccdcf61b2c9a5bb9a467dc387fa9c85fa708dbf00d7efca","block":{"Version2":{"hash":"2c1a1bda792d123d8ccdcf61b2c9a5bb9a467dc387fa9c85fa708dbf00d7efca","header":{"parent_hash":"77641e387a0ccf4372a0339292984ba6be4b0c3f8b79d7f69f1781c53854dd0f","state_root_hash":"383ea1fe76047e2315ead460bd0d13c0a55adad0dc4bd84782b45c97593b8e32","body_hash":"7e6c19c940988ff42f862af86ccfa17768c93e1821d4ff3feefa250c17e0785c","random_bit":true,"accumulated_seed":"7c053fa1625b5670561f6d59dd83c7057567b8bc89025ba78e37908e3c2c7622","era_end":null,"timestamp":"2024-03-20T09:27:58.468Z","era_id":33646,"height":336460,"protocol_version":"2.0.0"},"body":{"proposer":"01657f46b1f8f8db69a85b41e9b957e9c3d67695ba62f8645b5b01c605d2642925","mint":[],"auction":[],"install_upgrade":[],"standard":[],"rewarded_signatures":[]}}}} ```


-### Deploy by Hash +### Transaction by hash -Retrieve an aggregate of the various states a deploy goes through, given its deploy hash. The node does not emit this event, but the Sidecar computes it and returns it for the given deploy. This endpoint behaves differently than other endpoints, which return the raw event received from the node. +Retrieve an aggregate of the various states a transaction goes through, given its transaction hash. The endpoint also needs the transaction type as an input (`deploy` or `version1`) The node does not emit this event, but the Sidecar computes it and returns it for the given transaction. This endpoint behaves differently than other endpoints, which return the raw event received from the node. -The path URL is `/deploy/`. Enter a valid deploy hash. +The path URL is `/transaction//`. Enter a valid transaction hash. -The output differs depending on the deploy's status, which changes over time as the deploy goes through its [lifecycle](https://docs.casperlabs.io/concepts/design/casper-design/#execution-semantics-phases). +The output differs depending on the transaction's status, which changes over time as the transaction goes through its [lifecycle](https://docs.casperlabs.io/concepts/design/casper-design/#execution-semantics-phases). Example: -```json -curl -s http://127.0.0.1:18888/deploy/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 +```sh +curl -s http://127.0.0.1:18888//transaction/version1/3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a ``` -The sample output below is for a deploy that was accepted but has yet to be processed. +The sample output below is for a transaction that was accepted but has yet to be processed.
-Deploy accepted but not processed yet +Transaction accepted but not processed yet ```json -{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","deploy_accepted":{"hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","header":{"account":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","timestamp":"2022-11-20T22:33:59.786Z","ttl":"1h","gas_price":1,"body_hash":"c0c3dedaaac4c962a966376c124cf2225df9c8efce4c2af05c4181be661f41aa","dependencies":[],"chain_name":"casper"},"payment":{"ModuleBytes":{"module_bytes":"","args":[["amount",{"cl_type":"U512","bytes":"0410200395","parsed":"2500010000"}]]}},"session":{"StoredContractByHash":{"hash":"ccb576d6ce6dec84a551e48f0d0b7af89ddba44c7390b690036257a04a3ae9ea","entry_point":"add_bid","args":[["public_key",{"cl_type":"PublicKey","bytes":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","parsed":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a"}],["amount",{"cl_type":"U512","bytes":"05008aa69516","parsed":"97000000000"}],["delegation_rate",{"cl_type":"U8","bytes":"00","parsed":0}]]}},"approvals":[{"signer":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","signature":"01a7ff7affdc13fac7436acf1b6d7c2282fff0f9185ebe1ce97f2e510b20d0375ad07eaca46f8d72f342e7b9e50a39c2eaf75da0c63365abfd526bbaffa4d33f02"}]},"deploy_processed":{},"deploy_expired":false} +{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": ,"transaction_expired": false} ```


-The next sample output is for a deploy that was accepted and processed. +The next sample output is for a transaction that was accepted and processed.
-Deploy accepted and processed successfully +Transaction accepted and processed successfully ```json -{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","deploy_accepted":{"hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","header":{"account":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","timestamp":"2022-11-20T22:33:59.786Z","ttl":"1h","gas_price":1,"body_hash":"c0c3dedaaac4c962a966376c124cf2225df9c8efce4c2af05c4181be661f41aa","dependencies":[],"chain_name":"casper"},"payment":{"ModuleBytes":{"module_bytes":"","args":[["amount",{"cl_type":"U512","bytes":"0410200395","parsed":"2500010000"}]]}},"session":{"StoredContractByHash":{"hash":"ccb576d6ce6dec84a551e48f0d0b7af89ddba44c7390b690036257a04a3ae9ea","entry_point":"add_bid","args":[["public_key",{"cl_type":"PublicKey","bytes":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","parsed":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a"}],["amount",{"cl_type":"U512","bytes":"05008aa69516","parsed":"97000000000"}],["delegation_rate",{"cl_type":"U8","bytes":"00","parsed":0}]]}},"approvals":[{"signer":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","signature":"01a7ff7affdc13fac7436acf1b6d7c2282fff0f9185ebe1ce97f2e510b20d0375ad07eaca46f8d72f342e7b9e50a39c2eaf75da0c63365abfd526bbaffa4d33f02"}]},"deploy_processed":{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","account":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","timestamp":"2022-11-20T22:33:59.786Z","ttl":"1h","dependencies":[],"block_hash":"2caea6929fe4bd615f5c7451ecddc607a99d7512c85add4fe816bd4ee88fce63","execution_result":{"Success":{"effect":{"operations":[],"transforms":[{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-d63c44078a1931b5dc4b80a7a0ec586164fd0470ce9f8b23f6d93b9e86c5944d","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"05f0c773b316","parsed":"97499990000"}}},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":{"AddUInt512":"2500010000"}},{"key":"hash-ccb576d6ce6dec84a551e48f0d0b7af89ddba44c7390b690036257a04a3ae9ea","transform":"Identity"},{"key":"hash-86f2d45f024d7bb7fb5266b2390d7c253b588a0a16ebd946a60cb4314600af74","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-000","transform":{"WriteCLValue":{"cl_type":"Unit","bytes":"","parsed":null}}},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"00","parsed":"0"}}},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":"Identity"},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"04f03dcd1d","parsed":"499990000"}}},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":{"AddUInt512":"97000000000"}},{"key":"transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d","transform":{"WriteTransfer":{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","from":"account-hash-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","to":"account-hash-6174cf2e6f8fed1715c9a3bace9c50bfe572eecb763b0ed3f644532616452008","source":"uref-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd-007","target":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-007","amount":"97000000000","gas":"0","id":null}}},{"key":"bid-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","transform":{"WriteBid":{"validator_public_key":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","bonding_purse":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-007","staked_amount":"97000000000","delegation_rate":0,"vesting_schedule":null,"delegators":{},"inactive":false}}},{"key":"deploy-8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","transform":{"WriteDeployInfo":{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","transfers":["transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d"],"from":"account-hash-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","source":"uref-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd-007","gas":"2500000000"}}},{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-d63c44078a1931b5dc4b80a7a0ec586164fd0470ce9f8b23f6d93b9e86c5944d","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"balance-8c2ffb7e82c5a323a4e50f6eea9a080feb89c71bb2db001bde7449e13328c0dc","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"00","parsed":"0"}}},{"key":"balance-8c2ffb7e82c5a323a4e50f6eea9a080feb89c71bb2db001bde7449e13328c0dc","transform":{"AddUInt512":"2500010000"}}]},"transfers":["transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d"],"cost":"2500000000"}}},"deploy_expired":false} +{"transaction_hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","transaction_accepted": {"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes":"","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}},"transaction_processed": {"transaction_hash":{"Deploy":"c6907d46a5cc61ef30c66dbb6599208a57d3d62812c5f061169cdd7ad4e52597"},"initiator_addr":{"PublicKey":"0202dec9e70126ddd13af6e2e14771339c22f73626202a28ef1ed41594a3b2a79156"},"timestamp":"2024-03-20T13:58:57.301Z","ttl":"2m 53s","block_hash":"6c6a1fb17147fe467a52f8078e4c6d1143e8f61e2ec0c57938a0ac5f49e3f960","execution_result":{"Version1":{"Success":{"effect":{"operations":[{"key":"9192013132486795888","kind":"NoOp"}],"transforms":[{"key":"9278390014984155010","transform":{"AddUInt64":17967007786823421753}},{"key":"8284631679508534160","transform":{"AddUInt512":"13486131286369918968"}},{"key":"11406903664472624400","transform":{"AddKeys":[{"name":"5532223989822042950","key":"6376159234520705888"},{"name":"9797089120764120320","key":"3973583116099652644"},{"name":"17360643427404656075","key":"3412027808185329863"},{"name":"9849256366384177518","key":"1556404389498537987"},{"name":"14237913702817074429","key":"16416969798013966173"}]}},{"key":"11567235260771335457","transform":"Identity"},{"key":"13285707355579107355","transform":"Identity"}]},"transfers":[],"cost":"14667737366273622842"}}},"messages":[{"entity_addr":{"SmartContract":[193,43,184,185,6,88,15,83,243,107,130,63,136,174,24,148,79,214,87,238,171,138,195,141,119,235,134,196,253,221,36,0]},"message":{"String":"wLNta4zbpJiW5ScjagPXm5LoGViYApCfIbEXJycPUuLQP4fA7REhV4LdBRbZ7bQb"},"topic_name":"FdRRgbXEGS1xKEXCJKvaq7hVyZ2ZUlSb","topic_name_hash":"473f644238bbb334843df5bd06a85e8bc34d692cce804de5f97e7f344595c769","topic_index":4225483688,"block_index":16248749308130060594},{"entity_addr":{"Account":[109,75,111,241,219,141,104,160,197,208,7,245,112,199,31,150,68,65,166,247,43,111,0,56,32,124,7,36,107,230,100,132]},"message":{"String":"U5qR82wJoPDGJWhwJ4qkblsu6Q5DDqDt0Q2pAjhVOUjn520PdvYOC27oo4aDEosw"},"topic_name":"zMEkHxGgUUSMmb7eWJhFs5e6DH9vXvCg","topic_name_hash":"d911ebafb53ccfeaf5c970e462a864622ec4e3a1030a17a8cfaf4d7a4cd74d48","topic_index":560585407,"block_index":15889379229443860143}]},"transaction_expired": false} ``` +


-### Accepted Deploy by Hash +### Accepted transaction by hash -Retrieve information about an accepted deploy, given its deploy hash. +Retrieve information about an accepted transaction, given its transaction hash. -The path URL is `/deploy/accepted/`. Enter a valid deploy hash. +The path URL is `/transaction/accepted//`. Enter a valid transaction hash. Example: -```json -curl -s http://127.0.0.1:18888/deploy/accepted/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 +```sh +curl -s http://127.0.0.1:18888/transaction/accepted/version1/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 ```
Sample output ```json -{"hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","header":{"account":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","timestamp":"2022-11-20T22:33:59.786Z","ttl":"1h","gas_price":1,"body_hash":"c0c3dedaaac4c962a966376c124cf2225df9c8efce4c2af05c4181be661f41aa","dependencies":[],"chain_name":"casper"},"payment":{"ModuleBytes":{"module_bytes":"","args":[["amount",{"cl_type":"U512","bytes":"0410200395","parsed":"2500010000"}]]}},"session":{"StoredContractByHash":{"hash":"ccb576d6ce6dec84a551e48f0d0b7af89ddba44c7390b690036257a04a3ae9ea","entry_point":"add_bid","args":[["public_key",{"cl_type":"PublicKey","bytes":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","parsed":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a"}],["amount",{"cl_type":"U512","bytes":"05008aa69516","parsed":"97000000000"}],["delegation_rate",{"cl_type":"U8","bytes":"00","parsed":0}]]}},"approvals":[{"signer":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","signature":"01a7ff7affdc13fac7436acf1b6d7c2282fff0f9185ebe1ce97f2e510b20d0375ad07eaca46f8d72f342e7b9e50a39c2eaf75da0c63365abfd526bbaffa4d33f02"}]} +{"header": {"api_version": "2.0.0","network_name": "casper-net-1"},"payload": {"transaction": {"Version1": {"hash": "3141e85f8075c3a75c2a1abcc79810c07d103ff97c03200ab0d0baf91995fe4a","header": {"chain_name": "casper-net-1","timestamp": "2024-03-20T13:31:59.772Z","ttl": "30m","body_hash": "40c7476a175fb97656ec6da1ace2f1900a9d353f1637943a30edd5385494b345","pricing_mode": {"Fixed": {"gas_price_tolerance": 1000}},"initiator_addr": {"PublicKey": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973"}},"body": {"args": [],"target": {"Session": {"kind": "Standard","module_bytes": "","runtime": "VmCasperV1"}},"entry_point": {"Custom": "test"},"scheduling": "Standard"},"approvals": [{"signer": "01d848e225db95e34328ca1c64d73ecda50f5070fd6b21037453e532d085a81973","signature": "0154fd295f5d4d62544f63d70470de28b2bf2cddecac2a237b6a2a78d25ee14b21ea2861d711a51f57b3f9f74e247a8d26861eceead6569f233949864a9d5fa100"}]}}}} ```


-### Expired Deploy by Hash +### Expired transaction by hash -Retrieve information about a deploy that expired, given its deploy hash. +Retrieve information about a transaction that expired, given its trnasaction type and transaction hash. -The path URL is `/deploy/expired/`. Enter a valid deploy hash. +The path URL is `/transaction/expired//`. Enter a valid transaction hash. Example: +```sh +curl -s http://127.0.0.1:18888/transaction/expired/version1/3dcf9cb73977a1163129cb0801163323bea2a780815bc9dc46696a43c00e658c +``` + +
+Sample output + ```json -curl -s http://127.0.0.1:18888/deploy/expired/e03544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a +{"header": {"api_version": "2.0.0","network_name": "some-network"},"payload": {"transaction_hash": {"Version1": "3dcf9cb73977a1163129cb0801163323bea2a780815bc9dc46696a43c00e658c"}}} ``` +
-### Processed Deploy by Hash +### Processed transaction by hash -Retrieve information about a deploy that was processed, given its deploy hash. -The path URL is `/deploy/processed/`. Enter a valid deploy hash. +Retrieve information about a transaction that was processed, given its transaction hash. +The path URL is `/transaction/expired/version1/`. Enter a valid transaction hash. Example: -```json -curl -s http://127.0.0.1:18888/deploy/processed/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 +```sh +curl -s http://127.0.0.1:18888/transaction/processed/version1/8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7 ```
Sample output ```json -{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","account":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","timestamp":"2022-11-20T22:33:59.786Z","ttl":"1h","dependencies":[],"block_hash":"2caea6929fe4bd615f5c7451ecddc607a99d7512c85add4fe816bd4ee88fce63","execution_result":{"Success":{"effect":{"operations":[],"transforms":[{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-d63c44078a1931b5dc4b80a7a0ec586164fd0470ce9f8b23f6d93b9e86c5944d","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"05f0c773b316","parsed":"97499990000"}}},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":{"AddUInt512":"2500010000"}},{"key":"hash-ccb576d6ce6dec84a551e48f0d0b7af89ddba44c7390b690036257a04a3ae9ea","transform":"Identity"},{"key":"hash-86f2d45f024d7bb7fb5266b2390d7c253b588a0a16ebd946a60cb4314600af74","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-000","transform":{"WriteCLValue":{"cl_type":"Unit","bytes":"","parsed":null}}},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"00","parsed":"0"}}},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":"Identity"},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":"Identity"},{"key":"balance-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"04f03dcd1d","parsed":"499990000"}}},{"key":"balance-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915","transform":{"AddUInt512":"97000000000"}},{"key":"transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d","transform":{"WriteTransfer":{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","from":"account-hash-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","to":"account-hash-6174cf2e6f8fed1715c9a3bace9c50bfe572eecb763b0ed3f644532616452008","source":"uref-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd-007","target":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-007","amount":"97000000000","gas":"0","id":null}}},{"key":"bid-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","transform":{"WriteBid":{"validator_public_key":"01786c83c59eba29e1f4ae4ee601040970665a816ac5bf856108222b72723f782a","bonding_purse":"uref-3d52e976454512999aee042c3c298474a9d3fa98db80879052465c8a4c57c915-007","staked_amount":"97000000000","delegation_rate":0,"vesting_schedule":null,"delegators":{},"inactive":false}}},{"key":"deploy-8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","transform":{"WriteDeployInfo":{"deploy_hash":"8204af872d7d19ef8da947bce67c7a55449bc4e2aa12d2756e9ec7472b4854f7","transfers":["transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d"],"from":"account-hash-eb1dd0668899cf6b35cf99f5d4a7d3ea05acf352f75d14075982e0aebc099776","source":"uref-c182f2fafc6eb59306f971a3d3ad06e4ffa09364ca9de2fc48d123e40da243cd-007","gas":"2500000000"}}},{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-d63c44078a1931b5dc4b80a7a0ec586164fd0470ce9f8b23f6d93b9e86c5944d","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"hash-d2469afeb99130f0be7c9ce230a84149e6d756e306ef8cf5b8a49d5182e41676","transform":"Identity"},{"key":"hash-7cc1b1db4e08bbfe7bacf8e1ad828a5d9bcccbb33e55d322808c3a88da53213a","transform":"Identity"},{"key":"hash-4475016098705466254edd18d267a9dad43e341d4dafadb507d0fe3cf2d4a74b","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":"Identity"},{"key":"balance-8c2ffb7e82c5a323a4e50f6eea9a080feb89c71bb2db001bde7449e13328c0dc","transform":"Identity"},{"key":"balance-fe327f9815a1d016e1143db85e25a86341883949fd75ac1c1e7408a26c5b62ef","transform":{"WriteCLValue":{"cl_type":"U512","bytes":"00","parsed":"0"}}},{"key":"balance-8c2ffb7e82c5a323a4e50f6eea9a080feb89c71bb2db001bde7449e13328c0dc","transform":{"AddUInt512":"2500010000"}}]},"transfers":["transfer-1e75292a29d210326d8845082b302037300eac92c7d2612790ca3ab1a62e570d"],"cost":"2500000000"}}} +{"transaction_hash":{"Version1":"29cdf4ccfade736e191bd94835b8560d623b0bcf1a933a183ae484d7924c20ad"},"initiator_addr":{"PublicKey":"0119dfb1d2c12464158a6c2842ab0ea4ebc7723421b22d83dd626b5dfc7b95835c"},"timestamp":"2020-08-07T01:30:42.019Z","ttl":"17h 54m 57s 382ms","block_hash":"5a1e6c4cfba0173e2ffbdb6e694554770f8f60c277b87ef3eb97cac2b9521d83","execution_result":{"Version1":{"Success":{"effect":{"operations":[{"key":"17644600125096963714","kind":"NoOp"},{"key":"13459827733103253581","kind":"Read"},{"key":"11676014375412053969","kind":"Read"},{"key":"9909232825903509900","kind":"Read"},{"key":"8850104445275773933","kind":"Add"}],"transforms":[{"key":"2531221168812666934","transform":{"AddUInt128":"3115144695416809598"}},{"key":"1392271867216378917","transform":"WriteContract"},{"key":"16280628745773001665","transform":{"AddUInt512":"8249938852511436756"}}]},"transfers":["transfer-93b2d942db077f0659f63c0073b8c5cfc42f418e07c5da559cb6474fa7655123","transfer-d91deab111799e0b6fc2c1c8509b80aa2e78823605b11ce56b4177a7ab29a0de","transfer-4eaa442f898aa44df25ab9b52b9f09177c170b43b0f68015c307a7cf004d772a","transfer-73616d87fe918b059d673c7da9dca13c883894f4ff0bab1ffb9db825175e3cc1","transfer-f7472a12eeeaa23adf0cf5ca2329cc64a87b35bd478ac0d3c5774ef309fb4c49"],"cost":"6115103606978039045"}}},"messages":[{"entity_addr":{"SmartContract":[96,208,170,249,191,53,191,48,11,3,51,170,76,50,48,255,137,130,50,209,124,138,205,61,75,151,239,3,242,196,126,127]},"message":{"String":"KXpjKX96KMEDRqOnSHyivAF1sATg2RorsXp2CC7P69kM5wxXlTD83bM0zIv6X44U"},"topic_name":"rcMtmYrZOKhJATCXSN7Z57BUNW1UPzF0","topic_name_hash":"2e58fa22f0d51c7c886c3114510ba577b4a413c89aa044de55d972a2600450ac","topic_index":475963101,"block_index":16528668961632653036},{"entity_addr":{"System":[233,58,15,34,92,205,78,176,36,51,210,212,114,33,41,29,40,75,197,219,12,183,180,32,102,174,222,29,101,7,56,7]},"message":{"String":"fzagGCeHuPXnvMrn1I64kq4RPwcMLW2tOiBsmD1tUmIIz5Dgr9cAokY2KuDPVGMM"},"topic_name":"tsI4hSjHroXRXdim8IBZ3Gd1oOHitCE1","topic_name_hash":"0cebb0111bbe91d29d57ec175d011112362a73af58e7ddf6844609ab0d81ef3c","topic_index":152649425,"block_index":9888272225071285086}]} ```


-### Faults by Public Key +### Faults by public key Retrieve the faults associated with a validator's public key. The path URL is `/faults/`. Enter a valid hexadecimal representation of a validator's public key. Example: -```json +```sh curl -s http://127.0.0.1:18888/faults/01a601840126a0363a6048bfcbb0492ab5a313a1a19dc4c695650d8f3b51302703 ``` -### Faults by Era +### Faults by era Return the faults associated with an era, given a valid era identifier. The path URL is: `/faults/`. Enter an era identifier. Example: -```json +```sh curl -s http://127.0.0.1:18888/faults/2304 ``` -### Finality Signatures by Block +### Finality signatures by block Retrieve the finality signatures in a block, given its block hash. @@ -359,11 +362,11 @@ The path URL is: `/signatures/`. Enter a valid block hash Example: -```json +```sh curl -s http://127.0.0.1:18888/signatures/85aa2a939bc3a4afc6d953c965bab333bb5e53185b96bb07b52c295164046da2 ``` -### Step by Era +### Step by era Retrieve the step event emitted at the end of an era, given a valid era identifier. @@ -371,28 +374,28 @@ The path URL is: `/step/`. Enter a valid era identifier. Example: -```json +```sh curl -s http://127.0.0.1:18888/step/7268 ``` -### Missing Filter +### Missing filter If no filter URL was specified after the root address (HOST:PORT), an error message will be returned. Example: -```json +```sh curl http://127.0.0.1:18888 {"code":400,"message":"Invalid request path provided"} ``` -### Invalid Filter +### Invalid filter If an invalid filter was specified, an error message will be returned. Example: -```json +```sh curl http://127.0.0.1:18888/other {"code":400,"message":"Invalid request path provided"} ``` diff --git a/ci/publish-casper-event-sidecar-crates.yml b/ci/publish-casper-event-sidecar-crates.yml index 35861422..803eb612 100644 --- a/ci/publish-casper-event-sidecar-crates.yml +++ b/ci/publish-casper-event-sidecar-crates.yml @@ -1,5 +1,5 @@ --- -name: publish-casper-event-sidecar-crates +name: publish-casper-sidecar-crates on: push: diff --git a/ci/publish_deb_to_repo.sh b/ci/publish_deb_to_repo.sh index 10a31fdf..b320f8e7 100755 --- a/ci/publish_deb_to_repo.sh +++ b/ci/publish_deb_to_repo.sh @@ -1,13 +1,9 @@ #!/usr/bin/env bash set -e -# DEFAULTS -PLUGIN_OS_CODENAME="${PLUGIN_OS_CODENAME:-bionic}" - # Verify all variables are present if [[ -z $PLUGIN_GPG_KEY || -z $PLUGIN_GPG_PASS || -z $PLUGIN_REGION \ || -z $PLUGIN_REPO_NAME || -z $PLUGIN_ACL || -z $PLUGIN_PREFIX \ - || -z $AWS_SECRET_ACCESS_KEY || -z $AWS_ACCESS_KEY_ID \ || -z $PLUGIN_DEB_PATH || -z $PLUGIN_OS_CODENAME ]]; then echo "ERROR: Environment Variable Missing!" exit 1 diff --git a/event_sidecar/Cargo.toml b/event_sidecar/Cargo.toml new file mode 100644 index 00000000..8427c464 --- /dev/null +++ b/event_sidecar/Cargo.toml @@ -0,0 +1,68 @@ +[package] +name = "casper-event-sidecar" +authors = ["George Williamson ", "Jakub Zajkowski "] +version = "1.0.0" +edition = "2018" +readme = "README.md" +description = "App for storing and republishing sse events of a casper node" +license-file = "../LICENSE" +documentation = "README.md" +homepage = "https://github.com/casper-network/casper-sidecar/" +repository = "https://github.com/casper-network/casper-sidecar/" + +[features] +additional-metrics = ["casper-event-types/additional-metrics", "metrics/additional-metrics"] +testing = [] + +[dependencies] +anyhow = { workspace = true } +async-trait = "0.1.56" +bytes = "1.2.0" +casper-event-listener = { path = "../listener", version = "1.0.0" } +casper-event-types.workspace = true +casper-types = { workspace = true, features = ["std", "json-schema"] } +derive-new = "0.5.9" +eventsource-stream = "0.2.3" +futures = { workspace = true } +hex = "0.4.3" +hex_fmt = "0.3.0" +http = "0.2.1" +hyper = "0.14.4" +indexmap = "2.0.0" +itertools = { workspace = true } +jsonschema = "0.17.1" +metrics = { workspace = true } +pin-project = "1.1.5" +rand = "0.8.3" +regex = "1.6.0" +reqwest = "0.12.5" +schemars = "0.8.21" +sea-query = "0.30" +serde = { workspace = true, default-features = true, features = ["derive", "rc"] } +serde_json = "1.0" +sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "sqlite", "postgres"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-stream = { version = "0.1.4", features = ["sync"] } +tower = { version = "0.4.13", features = ["buffer", "limit", "make", "timeout"] } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true } +utoipa = { version = "4", features = ["rc_schema"] } +utoipa-swagger-ui = { version = "6" } +warp = { version = "0.3.6", features = ["compression"] } +wheelbuf = "0.2.0" + +[dev-dependencies] +async-stream = { workspace = true } +casper-event-types = { workspace = true, features = ["sse-data-testing"] } +casper-types = { workspace = true, features = ["std", "testing"] } +colored = "2.0.0" +futures-util = { workspace = true } +once_cell = { workspace = true } +pg-embed = { git = "https://github.com/zajko/pg-embed", branch = "bump_dependencies" } +portpicker = "0.1.1" +pretty_assertions = "1" +reqwest = { version = "0.12.5", features = ["stream"] } +tabled = { version = "0.10.0", features = ["derive", "color"] } +tempfile = "3" +tokio-util = "0.7.8" diff --git a/sidecar/src/admin_server.rs b/event_sidecar/src/admin_server.rs similarity index 73% rename from sidecar/src/admin_server.rs rename to event_sidecar/src/admin_server.rs index 2c206c27..fb17399e 100644 --- a/sidecar/src/admin_server.rs +++ b/event_sidecar/src/admin_server.rs @@ -1,13 +1,12 @@ -use crate::types::config::AdminServerConfig; +use crate::types::config::AdminApiServerConfig; use crate::utils::{resolve_address, root_filter, Unexpected}; use anyhow::Error; -use casper_event_types::metrics::metrics_summary; use hyper::Server; -use std::net::TcpListener; -use std::time::Duration; +use metrics::metrics_summary; +use std::{net::TcpListener, process::ExitCode, time::Duration}; use tower::{buffer::Buffer, make::Shared, ServiceBuilder}; -use warp::Filter; -use warp::{Rejection, Reply}; +use tracing::info; +use warp::{Filter, Rejection, Reply}; const BIND_ALL_INTERFACES: &str = "0.0.0.0"; struct AdminServer { @@ -28,7 +27,7 @@ impl AdminServer { .concurrency_limit(self.max_concurrent_requests as usize) .rate_limit(self.max_requests_per_second as u64, Duration::from_secs(1)) .service(warp_service); - + info!(address = %address, "started Admin API server"); Server::from_tcp(listener)? .serve(Shared::new(Buffer::new(tower_service, 50))) .await?; @@ -37,14 +36,20 @@ impl AdminServer { } } -pub async fn run_server(config: AdminServerConfig) -> Result<(), Error> { - AdminServer { - port: config.port, - max_concurrent_requests: config.max_concurrent_requests, - max_requests_per_second: config.max_requests_per_second, +pub async fn run_server(config: AdminApiServerConfig) -> Result { + if config.enable_server { + AdminServer { + port: config.port, + max_concurrent_requests: config.max_concurrent_requests, + max_requests_per_second: config.max_requests_per_second, + } + .start() + .await + .map(|_| ExitCode::SUCCESS) + } else { + info!("Admin API server is disabled. Skipping..."); + Ok(ExitCode::SUCCESS) } - .start() - .await } /// Return metrics data at a given time. @@ -65,7 +70,7 @@ async fn metrics_handler() -> Result { #[cfg(test)] mod tests { - use crate::{admin_server::run_server, types::config::AdminServerConfig}; + use crate::{admin_server::run_server, types::config::AdminApiServerConfig}; use portpicker::pick_unused_port; use reqwest::Response; @@ -73,7 +78,8 @@ mod tests { async fn given_config_should_start_admin_server() { let port = pick_unused_port().unwrap(); let request_url = format!("http://localhost:{}/metrics", port); - let admin_config = AdminServerConfig { + let admin_config = AdminApiServerConfig { + enable_server: true, port, max_concurrent_requests: 1, max_requests_per_second: 1, diff --git a/sidecar/src/api_version_manager.rs b/event_sidecar/src/api_version_manager.rs similarity index 100% rename from sidecar/src/api_version_manager.rs rename to event_sidecar/src/api_version_manager.rs diff --git a/sidecar/src/database/database_errors.rs b/event_sidecar/src/database/database_errors.rs similarity index 100% rename from sidecar/src/database/database_errors.rs rename to event_sidecar/src/database/database_errors.rs diff --git a/sidecar/src/database/env_vars.rs b/event_sidecar/src/database/env_vars.rs similarity index 100% rename from sidecar/src/database/env_vars.rs rename to event_sidecar/src/database/env_vars.rs diff --git a/sidecar/src/database/errors.rs b/event_sidecar/src/database/errors.rs similarity index 100% rename from sidecar/src/database/errors.rs rename to event_sidecar/src/database/errors.rs diff --git a/sidecar/src/database/migration_manager.rs b/event_sidecar/src/database/migration_manager.rs similarity index 100% rename from sidecar/src/database/migration_manager.rs rename to event_sidecar/src/database/migration_manager.rs diff --git a/sidecar/src/database/migration_manager/tests.rs b/event_sidecar/src/database/migration_manager/tests.rs similarity index 98% rename from sidecar/src/database/migration_manager/tests.rs rename to event_sidecar/src/database/migration_manager/tests.rs index d78c561f..d1fbd0bb 100644 --- a/sidecar/src/database/migration_manager/tests.rs +++ b/event_sidecar/src/database/migration_manager/tests.rs @@ -13,7 +13,6 @@ const MAX_CONNECTIONS: u32 = 100; #[tokio::test] async fn should_have_version_none_if_no_migrations_applied() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations(sqlite_db.clone(), vec![]).await; @@ -27,7 +26,6 @@ async fn should_have_version_none_if_no_migrations_applied() { #[tokio::test] async fn should_store_failed_version_if_migration_was_erroneous() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = @@ -43,7 +41,6 @@ async fn should_store_failed_version_if_migration_was_erroneous() { #[tokio::test] async fn should_apply_migration() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = @@ -66,7 +63,6 @@ async fn should_apply_migration() { #[tokio::test] async fn should_apply_migrations() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( @@ -104,7 +100,6 @@ async fn should_apply_migrations() { async fn given_ok_and_failing_migrations_first_should_be_applied_second_only_stored_in_migrations_table( ) { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( @@ -123,7 +118,6 @@ async fn given_ok_and_failing_migrations_first_should_be_applied_second_only_sto #[tokio::test] async fn should_fail_if_migration_has_no_version() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let migration = build_no_version_migration(); @@ -135,7 +129,6 @@ async fn should_fail_if_migration_has_no_version() { #[tokio::test] async fn should_fail_if_two_migrations_have_the_same_version() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( @@ -150,7 +143,6 @@ async fn should_fail_if_two_migrations_have_the_same_version() { #[tokio::test] async fn given_shuffled_migrations_should_sort_by_version() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( @@ -179,7 +171,6 @@ async fn given_shuffled_migrations_should_sort_by_version() { #[tokio::test] async fn should_execute_script() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = @@ -203,7 +194,6 @@ async fn should_execute_script() { #[tokio::test] async fn given_failed_migration_should_not_execute_next_migration_() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( @@ -222,7 +212,6 @@ async fn given_failed_migration_should_not_execute_next_migration_() { #[tokio::test] async fn should_store_failed_version_if_script_fails() { let sqlite_db = SqliteDatabase::new_in_memory_no_migrations(MAX_CONNECTIONS) - .await .expect("Error opening database in memory"); let apply_res = MigrationManager::apply_migrations( diff --git a/sidecar/src/database/mod.rs b/event_sidecar/src/database/mod.rs similarity index 82% rename from sidecar/src/database/mod.rs rename to event_sidecar/src/database/mod.rs index bbcbd297..c7796eed 100644 --- a/sidecar/src/database/mod.rs +++ b/event_sidecar/src/database/mod.rs @@ -11,3 +11,5 @@ pub mod sqlite_database; #[cfg(test)] pub mod tests; pub mod types; + +pub use self::database_errors::DatabaseConfigError; diff --git a/sidecar/src/database/postgresql_database.rs b/event_sidecar/src/database/postgresql_database.rs similarity index 100% rename from sidecar/src/database/postgresql_database.rs rename to event_sidecar/src/database/postgresql_database.rs diff --git a/sidecar/src/database/postgresql_database/reader.rs b/event_sidecar/src/database/postgresql_database/reader.rs similarity index 100% rename from sidecar/src/database/postgresql_database/reader.rs rename to event_sidecar/src/database/postgresql_database/reader.rs diff --git a/sidecar/src/database/postgresql_database/tests.rs b/event_sidecar/src/database/postgresql_database/tests.rs similarity index 51% rename from sidecar/src/database/postgresql_database/tests.rs rename to event_sidecar/src/database/postgresql_database/tests.rs index 12f40c38..15b36c25 100644 --- a/sidecar/src/database/postgresql_database/tests.rs +++ b/event_sidecar/src/database/postgresql_database/tests.rs @@ -4,24 +4,33 @@ use crate::{ utils::tests::build_postgres_database, }; use casper_types::testing::TestRng; -use sea_query::{Asterisk, Expr, PostgresQueryBuilder, Query, SqliteQueryBuilder}; +use sea_query::{PostgresQueryBuilder, Query}; use sqlx::Row; +use super::PostgreSqlDatabase; + #[tokio::test] async fn should_save_and_retrieve_a_u32max_id() { let context = build_postgres_database().await.unwrap(); - let sqlite_db = &context.db; - let sql = tables::event_log::create_insert_stmt(1, "source", u32::MAX, "event key") - .expect("Error creating event_log insert SQL") - .to_string(PostgresQueryBuilder); - let _ = sqlite_db.fetch_one(&sql).await; + let db = &context.db; + let sql = tables::event_log::create_insert_stmt( + 1, + "source", + u32::MAX, + "event key", + "2.0.3", + "network-1", + ) + .expect("Error creating event_log insert SQL") + .to_string(PostgresQueryBuilder); + let _ = db.fetch_one(&sql).await; let sql = Query::select() .column(tables::event_log::EventLog::EventId) .from(tables::event_log::EventLog::Table) .limit(1) .to_string(PostgresQueryBuilder); - let event_id_u32max = sqlite_db + let event_id_u32max = db .fetch_one(&sql) .await .try_get::(0) @@ -37,43 +46,51 @@ async fn should_save_and_retrieve_block_added() { } #[tokio::test] -async fn should_save_and_retrieve_deploy_accepted() { +async fn should_save_and_retrieve_transaction_accepted() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_save_and_retrieve_deploy_accepted(test_context.db.clone()).await; + crate::database::tests::should_save_and_retrieve_transaction_accepted(test_context.db.clone()) + .await; } #[tokio::test] -async fn should_save_and_retrieve_deploy_processed() { +async fn should_save_and_retrieve_transaction_processed() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_save_and_retrieve_deploy_processed(test_context.db.clone()) + crate::database::tests::should_save_and_retrieve_transaction_processed(test_context.db.clone()) .await; } #[tokio::test] -async fn should_save_and_retrieve_deploy_expired() { +async fn should_save_and_retrieve_transaction_expired() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_save_and_retrieve_deploy_expired(test_context.db.clone()).await; + crate::database::tests::should_save_and_retrieve_transaction_expired(test_context.db.clone()) + .await; } #[tokio::test] -async fn should_retrieve_deploy_aggregate_of_accepted() { +async fn should_retrieve_transaction_aggregate_of_accepted() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_retrieve_deploy_aggregate_of_accepted(test_context.db.clone()) - .await; + crate::database::tests::should_retrieve_transaction_aggregate_of_accepted( + test_context.db.clone(), + ) + .await; } #[tokio::test] -async fn should_retrieve_deploy_aggregate_of_processed() { +async fn should_retrieve_transaction_aggregate_of_processed() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_retrieve_deploy_aggregate_of_processed(test_context.db.clone()) - .await; + crate::database::tests::should_retrieve_transaction_aggregate_of_processed( + test_context.db.clone(), + ) + .await; } #[tokio::test] -async fn should_retrieve_deploy_aggregate_of_expired() { +async fn should_retrieve_transaction_aggregate_of_expired() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_retrieve_deploy_aggregate_of_expired(test_context.db.clone()) - .await; + crate::database::tests::should_retrieve_transaction_aggregate_of_expired( + test_context.db.clone(), + ) + .await; } #[tokio::test] @@ -126,27 +143,27 @@ async fn should_disallow_insert_of_existing_block_added() { } #[tokio::test] -async fn should_disallow_insert_of_existing_deploy_accepted() { +async fn should_disallow_insert_of_existing_transaction_accepted() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_disallow_insert_of_existing_deploy_accepted( + crate::database::tests::should_disallow_insert_of_existing_transaction_accepted( test_context.db.clone(), ) .await; } #[tokio::test] -async fn should_disallow_insert_of_existing_deploy_expired() { +async fn should_disallow_insert_of_existing_transaction_expired() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_disallow_insert_of_existing_deploy_expired( + crate::database::tests::should_disallow_insert_of_existing_transaction_expired( test_context.db.clone(), ) .await; } #[tokio::test] -async fn should_disallow_insert_of_existing_deploy_processed() { +async fn should_disallow_insert_of_existing_transaction_processed() { let test_context = build_postgres_database().await.unwrap(); - crate::database::tests::should_disallow_insert_of_existing_deploy_processed( + crate::database::tests::should_disallow_insert_of_existing_transaction_processed( test_context.db.clone(), ) .await; @@ -178,115 +195,118 @@ async fn should_save_block_added_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; + let db = &test_context.db; let block_added = BlockAdded::random(&mut test_rng); - assert!(sqlite_db - .save_block_added(block_added, 1, "127.0.0.1".to_string()) + assert!(db + .save_block_added( + block_added, + 1, + "127.0.0.1".to_string(), + "2.0.1".to_string(), + "network-1".to_string() + ) .await .is_ok()); - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::BlockAdded as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::BlockAdded as i16, + "2.0.1", + "network-1", + ) + .await; } #[tokio::test] -async fn should_save_deploy_accepted_with_correct_event_type_id() { +async fn should_save_transaction_accepted_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; - - let deploy_accepted = DeployAccepted::random(&mut test_rng); - - assert!(sqlite_db - .save_deploy_accepted(deploy_accepted, 1, "127.0.0.1".to_string()) + let db = &test_context.db; + + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + + assert!(db + .save_transaction_accepted( + transaction_accepted, + 1, + "127.0.0.1".to_string(), + "2.0.5".to_string(), + "network-2".to_string() + ) .await .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployAccepted as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionAccepted as i16, + "2.0.5", + "network-2", + ) + .await; } #[tokio::test] -async fn should_save_deploy_processed_with_correct_event_type_id() { +async fn should_save_transaction_processed_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; - - let deploy_processed = DeployProcessed::random(&mut test_rng, None); - - assert!(sqlite_db - .save_deploy_processed(deploy_processed, 1, "127.0.0.1".to_string()) + let db = &test_context.db; + + let transaction_processed = TransactionProcessed::random(&mut test_rng, None); + + assert!(db + .save_transaction_processed( + transaction_processed, + 1, + "127.0.0.1".to_string(), + "2.0.3".to_string(), + "network-3".to_string(), + ) .await .is_ok()); - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployProcessed as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionProcessed as i16, + "2.0.3", + "network-3", + ) + .await; } #[tokio::test] -async fn should_save_deploy_expired_with_correct_event_type_id() { +async fn should_save_transaction_expired_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; - - let deploy_expired = DeployExpired::random(&mut test_rng, None); - - assert!(sqlite_db - .save_deploy_expired(deploy_expired, 1, "127.0.0.1".to_string()) + let db = &test_context.db; + + let transaction_expired = TransactionExpired::random(&mut test_rng, None); + + assert!(db + .save_transaction_expired( + transaction_expired, + 1, + "127.0.0.1".to_string(), + "2.0.4".to_string(), + "network-4".to_string(), + ) .await .is_ok()); - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployExpired as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionExpired as i16, + "2.0.4", + "network-4", + ) + .await; } #[tokio::test] @@ -294,28 +314,29 @@ async fn should_save_fault_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; + let db = &test_context.db; let fault = Fault::random(&mut test_rng); - assert!(sqlite_db - .save_fault(fault, 1, "127.0.0.1".to_string()) + assert!(db + .save_fault( + fault, + 1, + "127.0.0.1".to_string(), + "2.0.5".to_string(), + "network-5".to_string() + ) .await .is_ok()); - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::Fault as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::Fault as i16, + "2.0.5", + "network-5", + ) + .await; } #[tokio::test] @@ -323,28 +344,29 @@ async fn should_save_finality_signature_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; + let db = &test_context.db; let finality_signature = FinalitySignature::random(&mut test_rng); - assert!(sqlite_db - .save_finality_signature(finality_signature, 1, "127.0.0.1".to_string()) + assert!(db + .save_finality_signature( + finality_signature, + 1, + "127.0.0.1".to_string(), + "2.0.5".to_string(), + "network-5".to_string(), + ) .await .is_ok()); - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::FinalitySignature as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::FinalitySignature as i16, + "2.0.5", + "network-5", + ) + .await; } #[tokio::test] @@ -352,46 +374,51 @@ async fn should_save_step_with_correct_event_type_id() { let mut test_rng = TestRng::new(); let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; + let db = &test_context.db; let step = Step::random(&mut test_rng); - assert!(sqlite_db - .save_step(step, 1, "127.0.0.1".to_string()) + assert!(db + .save_step( + step, + 1, + "127.0.0.1".to_string(), + "2.0.6".to_string(), + "network-6".to_string() + ) .await .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::Step as i16) + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::Step as i16, + "2.0.6", + "network-6", + ) + .await; } #[tokio::test] async fn should_save_and_retrieve_a_shutdown() { let test_context = build_postgres_database().await.unwrap(); - let sqlite_db = &test_context.db; - assert!(sqlite_db.save_shutdown(15, "xyz".to_string()).await.is_ok()); - - let sql = Query::select() - .expr(Expr::col(Asterisk)) - .from(tables::shutdown::Shutdown::Table) - .to_string(SqliteQueryBuilder); - let row = sqlite_db.fetch_one(&sql).await; - - assert_eq!( - row.get::("event_source_address"), - "xyz".to_string() - ); + let db = &test_context.db; + assert!(db + .save_shutdown( + 15, + "xyz".to_string(), + "2.0.7".to_string(), + "network-7".to_string() + ) + .await + .is_ok()); + verify_event_log_entry( + db, + "xyz", + EventTypeId::Shutdown as i16, + "2.0.7", + "network-7", + ) + .await; } #[tokio::test] @@ -408,3 +435,32 @@ async fn get_number_of_events_should_return_1_when_event_stored() { ) .await; } + +async fn verify_event_log_entry( + db: &PostgreSqlDatabase, + expected_event_source_address: &str, + expected_event_type_id: i16, + expected_api_version: &str, + expected_network_name: &str, +) { + let sql = crate::database::tests::fetch_event_log_data_query().to_string(PostgresQueryBuilder); + + let row = db.fetch_one(&sql).await; + let event_type_id = row + .try_get::(0) + .expect("Error getting event_type_id from row"); + let api_version = row + .try_get::(1) + .expect("Error getting api_version from row"); + let network_name = row + .try_get::(2) + .expect("Error getting network_name from row"); + let event_source_address = row.get::(3); + assert_eq!(event_type_id, expected_event_type_id); + assert_eq!(api_version, expected_api_version.to_string()); + assert_eq!(network_name, expected_network_name.to_string()); + assert_eq!( + event_source_address, + expected_event_source_address.to_string() + ); +} diff --git a/sidecar/src/database/postgresql_database/writer.rs b/event_sidecar/src/database/postgresql_database/writer.rs similarity index 95% rename from sidecar/src/database/postgresql_database/writer.rs rename to event_sidecar/src/database/postgresql_database/writer.rs index 2279b419..f58c79ba 100644 --- a/sidecar/src/database/postgresql_database/writer.rs +++ b/event_sidecar/src/database/postgresql_database/writer.rs @@ -10,7 +10,6 @@ database_writer_implementation!( PgQueryResult, PostgresQueryBuilder, DDLConfiguration { - is_big_integer_id: true, db_supports_unsigned: false, } ); diff --git a/sidecar/src/database/reader_generator.rs b/event_sidecar/src/database/reader_generator.rs similarity index 54% rename from sidecar/src/database/reader_generator.rs rename to event_sidecar/src/database/reader_generator.rs index 53d63a14..e11dd313 100644 --- a/sidecar/src/database/reader_generator.rs +++ b/event_sidecar/src/database/reader_generator.rs @@ -5,21 +5,27 @@ macro_rules! database_reader_implementation { $query_materializer_expr:expr) => { use anyhow::Error; use async_trait::async_trait; - use casper_event_types::FinalitySignature as FinSig; + use casper_types::FinalitySignature as FinSig; + use metrics::db::observe_raw_data_size; use serde::Deserialize; use sqlx::{Executor, Row}; use $crate::{ - database::errors::{wrap_query_error, DbError}, + database::{ + errors::{wrap_query_error, DbError}, + types::SseEnvelope, + }, sql::tables, types::{ - database::{DatabaseReadError, DatabaseReader, DeployAggregate}, + database::{ + DatabaseReadError, DatabaseReader, TransactionAggregate, TransactionTypeId, + }, sse_events::*, }, }; #[async_trait] impl DatabaseReader for $extended_type { - async fn get_latest_block(&self) -> Result { + async fn get_latest_block(&self) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = tables::block_added::create_get_latest_stmt() @@ -32,7 +38,7 @@ macro_rules! database_reader_implementation { async fn get_block_by_height( &self, height: u64, - ) -> Result { + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = tables::block_added::create_get_by_height_stmt(height) @@ -43,7 +49,10 @@ macro_rules! database_reader_implementation { parse_block_from_row(row) } - async fn get_block_by_hash(&self, hash: &str) -> Result { + async fn get_block_by_hash( + &self, + hash: &str, + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = tables::block_added::create_get_by_hash_stmt(hash.to_string()) @@ -59,43 +68,52 @@ macro_rules! database_reader_implementation { }) } - async fn get_deploy_aggregate_by_hash( + async fn get_transaction_aggregate_by_identifier( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result { + ) -> Result { // We may return here with NotFound because if there's no accepted record then theoretically there should be no other records for the given hash. - let deploy_accepted = self.get_deploy_accepted_by_hash(hash).await?; - - // However we handle the Err case for DeployProcessed explicitly as we don't want to return NotFound when we've got a DeployAccepted to return - match self.get_deploy_processed_by_hash(hash).await { - Ok(deploy_processed) => Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: Some(deploy_processed), - deploy_expired: false, + let transaction_accepted = self + .get_transaction_accepted_by_hash(transaction_type.clone(), hash) + .await?; + + // However we handle the Err case for TransactionProcessed explicitly as we don't want to return NotFound when we've got a TransactionAccepted to return + match self + .get_transaction_processed_by_hash(transaction_type, hash) + .await + { + Ok(transaction_processed) => Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(transaction_accepted), + transaction_processed: Some(transaction_processed), + transaction_expired: false, }), Err(err) => { // If the error is anything other than NotFound return the error. if !matches!(DatabaseReadError::NotFound, _err) { return Err(err); } - match self.get_deploy_expired_by_hash(hash).await { - Ok(_) => Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: None, - deploy_expired: true, + match self + .get_transaction_expired_by_hash(transaction_type, hash) + .await + { + Ok(_) => Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(transaction_accepted), + transaction_processed: None, + transaction_expired: true, }), Err(err) => { // If the error is anything other than NotFound return the error. if !matches!(DatabaseReadError::NotFound, _err) { return Err(err); } - Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: None, - deploy_expired: false, + Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(transaction_accepted), + transaction_processed: None, + transaction_expired: false, }) } } @@ -103,14 +121,18 @@ macro_rules! database_reader_implementation { } } - async fn get_deploy_accepted_by_hash( + async fn get_transaction_accepted_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result { + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; - let stmt = tables::deploy_accepted::create_get_by_hash_stmt(hash.to_string()) - .to_string($query_materializer_expr); + let stmt = tables::transaction_accepted::create_get_by_hash_stmt( + transaction_type.into(), + hash.to_string(), + ) + .to_string($query_materializer_expr); db_connection .fetch_optional(stmt.as_str()) @@ -119,22 +141,27 @@ macro_rules! database_reader_implementation { .and_then(|maybe_row| match maybe_row { None => Err(DatabaseReadError::NotFound), Some(row) => { - let raw = row - .try_get::("raw") - .map_err(|error| wrap_query_error(error.into()))?; - deserialize_data::(&raw).map_err(wrap_query_error) + let (raw, api_version, network_name) = + fetch_envelope_data_from_row(row, "TransactionAccepted")?; + let sse_event = deserialize_data::(&raw) + .map_err(wrap_query_error)?; + Ok(SseEnvelope::new(sse_event, api_version, network_name)) } }) } - async fn get_deploy_processed_by_hash( + async fn get_transaction_processed_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result { + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; - let stmt = tables::deploy_processed::create_get_by_hash_stmt(hash.to_string()) - .to_string($query_materializer_expr); + let stmt = tables::transaction_processed::create_get_by_hash_stmt( + transaction_type.into(), + hash.to_string(), + ) + .to_string($query_materializer_expr); db_connection .fetch_optional(stmt.as_str()) @@ -143,22 +170,27 @@ macro_rules! database_reader_implementation { .and_then(|maybe_row| match maybe_row { None => Err(DatabaseReadError::NotFound), Some(row) => { - let raw = row - .try_get::("raw") - .map_err(|sqlx_error| wrap_query_error(sqlx_error.into()))?; - deserialize_data::(&raw).map_err(wrap_query_error) + let (raw, api_version, network_name) = + fetch_envelope_data_from_row(row, "TransactionProcessed")?; + let sse_event = deserialize_data::(&raw) + .map_err(wrap_query_error)?; + Ok(SseEnvelope::new(sse_event, api_version, network_name)) } }) } - async fn get_deploy_expired_by_hash( + async fn get_transaction_expired_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result { + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; - let stmt = tables::deploy_expired::create_get_by_hash_stmt(hash.to_string()) - .to_string($query_materializer_expr); + let stmt = tables::transaction_expired::create_get_by_hash_stmt( + transaction_type.into(), + hash.to_string(), + ) + .to_string($query_materializer_expr); db_connection .fetch_optional(stmt.as_str()) @@ -167,10 +199,11 @@ macro_rules! database_reader_implementation { .and_then(|maybe_row| match maybe_row { None => Err(DatabaseReadError::NotFound), Some(row) => { - let raw = row - .try_get::("raw") - .map_err(|sqlx_error| wrap_query_error(sqlx_error.into()))?; - deserialize_data::(&raw).map_err(wrap_query_error) + let (raw, api_version, network_name) = + fetch_envelope_data_from_row(row, "TransactionExpired")?; + let sse_event = deserialize_data::(&raw) + .map_err(wrap_query_error)?; + Ok(SseEnvelope::new(sse_event, api_version, network_name)) } }) } @@ -178,7 +211,7 @@ macro_rules! database_reader_implementation { async fn get_faults_by_public_key( &self, public_key: &str, - ) -> Result, DatabaseReadError> { + ) -> Result>, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = @@ -192,7 +225,10 @@ macro_rules! database_reader_implementation { .and_then(parse_faults_from_rows) } - async fn get_faults_by_era(&self, era: u64) -> Result, DatabaseReadError> { + async fn get_faults_by_era( + &self, + era: u64, + ) -> Result>, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = tables::fault::create_get_faults_by_era_stmt(era) @@ -208,7 +244,7 @@ macro_rules! database_reader_implementation { async fn get_finality_signatures_by_block( &self, block_hash: &str, - ) -> Result, DatabaseReadError> { + ) -> Result>, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = @@ -224,7 +260,10 @@ macro_rules! database_reader_implementation { .and_then(parse_finality_signatures_from_rows) } - async fn get_step_by_era(&self, era: u64) -> Result { + async fn get_step_by_era( + &self, + era: u64, + ) -> Result, DatabaseReadError> { let db_connection = &self.connection_pool; let stmt = @@ -237,10 +276,11 @@ macro_rules! database_reader_implementation { .and_then(|maybe_row| match maybe_row { None => Err(DatabaseReadError::NotFound), Some(row) => { - let raw = row - .try_get::("raw") - .map_err(|sqlx_error| wrap_query_error(sqlx_error.into()))?; - deserialize_data::(&raw).map_err(wrap_query_error) + let (raw, api_version, network_name) = + fetch_envelope_data_from_row(row, "Step")?; + let sse_event = + deserialize_data::(&raw).map_err(wrap_query_error)?; + Ok(SseEnvelope::new(sse_event, api_version, network_name)) } }) } @@ -281,25 +321,29 @@ macro_rules! database_reader_implementation { serde_json::from_str::(data).map_err(DbError::SerdeJson) } - fn parse_block_from_row(row: $row_type) -> Result { - let raw_data = row - .try_get::("raw") - .map_err(|sqlx_err| wrap_query_error(sqlx_err.into()))?; - deserialize_data::(&raw_data).map_err(wrap_query_error) + fn parse_block_from_row( + row: $row_type, + ) -> Result, DatabaseReadError> { + let (raw_data, api_version, network_name) = + fetch_envelope_data_from_row(row, "BlockAdded")?; + let sse_event = deserialize_data::(&raw_data).map_err(wrap_query_error)?; + Ok(SseEnvelope::new(sse_event, api_version, network_name)) } fn parse_finality_signatures_from_rows( rows: Vec<$row_type>, - ) -> Result, DatabaseReadError> { + ) -> Result>, DatabaseReadError> { let mut finality_signatures = Vec::new(); for row in rows { - let raw = row - .try_get::("raw") - .map_err(|err| wrap_query_error(err.into()))?; - - let finality_signature = + let (raw, api_version, network_name) = + fetch_envelope_data_from_row(row, "FinalitySignature")?; + let sse_event = deserialize_data::(&raw).map_err(wrap_query_error)?; - finality_signatures.push(finality_signature.inner()); + finality_signatures.push(SseEnvelope::new( + sse_event.inner(), + api_version, + network_name, + )); } if finality_signatures.is_empty() { @@ -308,15 +352,14 @@ macro_rules! database_reader_implementation { Ok(finality_signatures) } - fn parse_faults_from_rows(rows: Vec<$row_type>) -> Result, DatabaseReadError> { + fn parse_faults_from_rows( + rows: Vec<$row_type>, + ) -> Result>, DatabaseReadError> { let mut faults = Vec::new(); for row in rows { - let raw = row - .try_get::("raw") - .map_err(|err| wrap_query_error(err.into()))?; - - let fault = deserialize_data::(&raw).map_err(wrap_query_error)?; - faults.push(fault); + let (raw, api_version, network_name) = fetch_envelope_data_from_row(row, "Fault")?; + let sse_event = deserialize_data::(&raw).map_err(wrap_query_error)?; + faults.push(SseEnvelope::new(sse_event, api_version, network_name)); } if faults.is_empty() { @@ -324,5 +367,22 @@ macro_rules! database_reader_implementation { } Ok(faults) } + + fn fetch_envelope_data_from_row( + row: $row_type, + message_type: &str, + ) -> Result<(String, String, String), DatabaseReadError> { + let raw_data = row + .try_get::("raw") + .map_err(|sqlx_err| wrap_query_error(sqlx_err.into()))?; + observe_raw_data_size(message_type, raw_data.len()); + let api_version = row + .try_get::("api_version") + .map_err(|sqlx_err| wrap_query_error(sqlx_err.into()))?; + let network_name = row + .try_get::("network_name") + .map_err(|sqlx_err| wrap_query_error(sqlx_err.into()))?; + Ok((raw_data, api_version, network_name)) + } }; } diff --git a/sidecar/src/database/sqlite_database.rs b/event_sidecar/src/database/sqlite_database.rs similarity index 89% rename from sidecar/src/database/sqlite_database.rs rename to event_sidecar/src/database/sqlite_database.rs index 14334126..7fd2c66c 100644 --- a/sidecar/src/database/sqlite_database.rs +++ b/event_sidecar/src/database/sqlite_database.rs @@ -3,7 +3,7 @@ mod reader; mod tests; mod writer; use super::migration_manager::MigrationManager; -#[cfg(test)] +#[cfg(any(feature = "testing", test))] use crate::types::config::StorageConfig; use crate::{ sql::tables, @@ -11,7 +11,7 @@ use crate::{ }; use anyhow::Error; use sea_query::SqliteQueryBuilder; -#[cfg(test)] +#[cfg(any(feature = "testing", test))] use sqlx::Row; use sqlx::{ sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions}, @@ -96,29 +96,24 @@ impl SqliteDatabase { } } -#[cfg(test)] +#[cfg(any(feature = "testing", test))] impl SqliteDatabase { pub async fn new_from_config(storage_config: &StorageConfig) -> Result { - match storage_config { - StorageConfig::SqliteDbConfig { - storage_path, - sqlite_config, - } => SqliteDatabase::new(Path::new(storage_path), sqlite_config.clone()).await, - StorageConfig::PostgreSqlDbConfig { .. } => Err(Error::msg( - "can't build Sqlite database from postgres config", - )), + if let Some(sqlite_config) = &storage_config.sqlite_config { + let storage_folder = Path::new(&storage_config.storage_folder); + SqliteDatabase::new(storage_folder, sqlite_config.clone()).await + } else { + Err(Error::msg("No sqlite config found")) } } pub async fn new_in_memory(max_connections: u32) -> Result { - let sqlite_db = Self::new_in_memory_no_migrations(max_connections).await?; + let sqlite_db = Self::new_in_memory_no_migrations(max_connections)?; MigrationManager::apply_all_migrations(sqlite_db.clone()).await?; Ok(sqlite_db) } - pub async fn new_in_memory_no_migrations( - max_connections: u32, - ) -> Result { + pub fn new_in_memory_no_migrations(max_connections: u32) -> Result { let connection_pool = SqlitePoolOptions::new() .max_connections(max_connections) .connect_lazy_with( diff --git a/sidecar/src/database/sqlite_database/reader.rs b/event_sidecar/src/database/sqlite_database/reader.rs similarity index 100% rename from sidecar/src/database/sqlite_database/reader.rs rename to event_sidecar/src/database/sqlite_database/reader.rs diff --git a/event_sidecar/src/database/sqlite_database/tests.rs b/event_sidecar/src/database/sqlite_database/tests.rs new file mode 100644 index 00000000..e12c928d --- /dev/null +++ b/event_sidecar/src/database/sqlite_database/tests.rs @@ -0,0 +1,438 @@ +use sea_query::{Query, SqliteQueryBuilder}; +use sqlx::Row; + +use casper_types::testing::TestRng; + +use super::SqliteDatabase; +use crate::{ + sql::tables::{self, event_type::EventTypeId}, + types::{database::DatabaseWriter, sse_events::*}, +}; + +const MAX_CONNECTIONS: u32 = 100; + +#[cfg(test)] +async fn build_database() -> SqliteDatabase { + SqliteDatabase::new_in_memory(MAX_CONNECTIONS) + .await + .expect("Error opening database in memory") +} + +#[tokio::test] +async fn should_save_and_retrieve_a_u32max_id() { + let sqlite_db = build_database().await; + let sql = tables::event_log::create_insert_stmt( + 1, + "source", + u32::MAX, + "event key", + "some_version", + "network-1", + ) + .expect("Error creating event_log insert SQL") + .to_string(SqliteQueryBuilder); + + let _ = sqlite_db.fetch_one(&sql).await; + + let sql = Query::select() + .column(tables::event_log::EventLog::EventId) + .from(tables::event_log::EventLog::Table) + .limit(1) + .to_string(SqliteQueryBuilder); + + let event_id_u32max = sqlite_db + .fetch_one(&sql) + .await + .try_get::(0) + .expect("Error getting event_id (=u32::MAX) from row"); + + assert_eq!(event_id_u32max, u32::MAX); +} + +#[tokio::test] +async fn should_save_and_retrieve_block_added() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_block_added(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_transaction_accepted() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_transaction_accepted(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_transaction_processed() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_transaction_processed(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_transaction_expired() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_transaction_expired(sqlite_db).await; +} + +#[tokio::test] +async fn should_retrieve_transaction_aggregate_of_accepted() { + let sqlite_db = build_database().await; + crate::database::tests::should_retrieve_transaction_aggregate_of_accepted(sqlite_db).await; +} + +#[tokio::test] +async fn should_retrieve_transaction_aggregate_of_processed() { + let sqlite_db = build_database().await; + crate::database::tests::should_retrieve_transaction_aggregate_of_processed(sqlite_db).await; +} + +#[tokio::test] +async fn should_retrieve_transaction_aggregate_of_expired() { + let sqlite_db = build_database().await; + crate::database::tests::should_retrieve_transaction_aggregate_of_expired(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_fault() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_fault(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_fault_with_a_u64max() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_fault_with_a_u64max(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_finality_signature() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_finality_signature(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_step() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_step(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_and_retrieve_a_step_with_u64_max_era() { + let sqlite_db = build_database().await; + crate::database::tests::should_save_and_retrieve_a_step_with_u64_max_era(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_duplicate_event_id_from_source() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_duplicate_event_id_from_source(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_block_added() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_block_added(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_transaction_accepted() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_transaction_accepted(sqlite_db) + .await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_transaction_expired() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_transaction_expired(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_transaction_processed() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_transaction_processed(sqlite_db) + .await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_fault() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_fault(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_finality_signature() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_finality_signature(sqlite_db).await; +} + +#[tokio::test] +async fn should_disallow_insert_of_existing_step() { + let sqlite_db = build_database().await; + crate::database::tests::should_disallow_insert_of_existing_step(sqlite_db).await; +} + +#[tokio::test] +async fn should_save_block_added_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let block_added = BlockAdded::random(&mut test_rng); + + assert!(db + .save_block_added( + block_added, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string() + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::BlockAdded as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_transaction_accepted_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + + assert!(db + .save_transaction_accepted( + transaction_accepted, + 1, + "127.0.0.1".to_string(), + "1.5.5".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionAccepted as i16, + "1.5.5", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_transaction_processed_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let transaction_processed = TransactionProcessed::random(&mut test_rng, None); + + assert!(db + .save_transaction_processed( + transaction_processed, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionProcessed as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_transaction_expired_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let transaction_expired = TransactionExpired::random(&mut test_rng, None); + + assert!(db + .save_transaction_expired( + transaction_expired, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::TransactionExpired as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_fault_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let fault = Fault::random(&mut test_rng); + + assert!(db + .save_fault( + fault, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string() + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::Fault as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_finality_signature_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let finality_signature = FinalitySignature::random(&mut test_rng); + + assert!(db + .save_finality_signature( + finality_signature, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::FinalitySignature as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_step_with_correct_event_type_id() { + let mut test_rng = TestRng::new(); + + let db = build_database().await; + + let step = Step::random(&mut test_rng); + + assert!(db + .save_step( + step, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "127.0.0.1", + EventTypeId::Step as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn should_save_and_retrieve_a_shutdown() { + let db = build_database().await; + assert!(db + .save_shutdown( + 15, + "xyz".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + verify_event_log_entry( + db, + "xyz", + EventTypeId::Shutdown as i16, + "1.1.1", + "network-1", + ) + .await; +} + +#[tokio::test] +async fn get_number_of_events_should_return_0() { + let sqlite_db = build_database().await; + crate::database::tests::get_number_of_events_should_return_0(sqlite_db).await; +} + +#[tokio::test] +async fn get_number_of_events_should_return_1_when_event_stored() { + let sqlite_db = build_database().await; + crate::database::tests::get_number_of_events_should_return_1_when_event_stored(sqlite_db).await; +} + +async fn verify_event_log_entry( + db: SqliteDatabase, + expected_event_source_address: &str, + expected_event_type_id: i16, + expected_api_version: &str, + expected_network_name: &str, +) { + let sql = crate::database::tests::fetch_event_log_data_query().to_string(SqliteQueryBuilder); + let row = db.fetch_one(&sql).await; + let event_type_id = row + .try_get::(0) + .expect("Error getting event_type_id from row"); + let api_version = row + .try_get::(1) + .expect("Error getting api_version from row"); + let network_name = row + .try_get::(2) + .expect("Error getting network_name from row"); + let event_source_address = row.get::(3); + assert_eq!(event_type_id, expected_event_type_id); + assert_eq!(api_version, expected_api_version.to_string()); + assert_eq!(network_name, expected_network_name.to_string()); + assert_eq!( + event_source_address, + expected_event_source_address.to_string() + ); +} diff --git a/sidecar/src/database/sqlite_database/writer.rs b/event_sidecar/src/database/sqlite_database/writer.rs similarity index 94% rename from sidecar/src/database/sqlite_database/writer.rs rename to event_sidecar/src/database/sqlite_database/writer.rs index bcc8fbcc..a219262a 100644 --- a/sidecar/src/database/sqlite_database/writer.rs +++ b/event_sidecar/src/database/sqlite_database/writer.rs @@ -11,7 +11,6 @@ database_writer_implementation!( SqliteQueryResult, SqliteQueryBuilder, DDLConfiguration { - is_big_integer_id: false, db_supports_unsigned: true, } ); diff --git a/event_sidecar/src/database/tests.rs b/event_sidecar/src/database/tests.rs new file mode 100644 index 00000000..5ea632d2 --- /dev/null +++ b/event_sidecar/src/database/tests.rs @@ -0,0 +1,705 @@ +use crate::{ + sql::tables, + types::{ + database::{DatabaseReader, DatabaseWriteError, DatabaseWriter, TransactionTypeId}, + sse_events::*, + }, +}; +use casper_types::{testing::TestRng, AsymmetricType, EraId}; +use rand::Rng; +use sea_query::{Query, SelectStatement}; + +pub async fn should_save_and_retrieve_block_added(db: DB) { + let mut test_rng = TestRng::new(); + let block_added = BlockAdded::random(&mut test_rng); + + db.save_block_added( + block_added.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving block_added"); + + db.get_latest_block() + .await + .expect("Error getting latest block_added"); + + db.get_block_by_hash(&block_added.hex_encoded_hash()) + .await + .expect("Error getting block_added by hash"); + + db.get_block_by_height(block_added.get_height()) + .await + .expect("Error getting block_added by height"); +} + +pub async fn should_save_and_retrieve_transaction_accepted( + db: DB, +) { + let mut test_rng = TestRng::new(); + + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + let transaction_type_id = match transaction_accepted.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + db.save_transaction_accepted( + transaction_accepted.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_accepted"); + + db.get_transaction_accepted_by_hash( + &transaction_type_id, + &transaction_accepted.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction_accepted by hash"); +} + +pub async fn should_save_and_retrieve_transaction_processed( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_processed = TransactionProcessed::random(&mut test_rng, None); + let transaction_type_id = match transaction_processed.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + db.save_transaction_processed( + transaction_processed.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_processed"); + + db.get_transaction_processed_by_hash( + &transaction_type_id, + &transaction_processed.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction_processed by hash"); +} + +pub async fn should_save_and_retrieve_transaction_expired( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_expired = TransactionExpired::random(&mut test_rng, None); + let transaction_type_id = match transaction_expired.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + db.save_transaction_expired( + transaction_expired.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_expired"); + + db.get_transaction_expired_by_hash( + &transaction_type_id, + &transaction_expired.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction_expired by hash"); +} + +pub async fn should_retrieve_transaction_aggregate_of_accepted< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + let transaction_type_id = match transaction_accepted.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + + db.save_transaction_accepted( + transaction_accepted.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_accepted"); + + db.get_transaction_aggregate_by_identifier( + &transaction_type_id, + &transaction_accepted.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction aggregate by hash"); +} + +pub async fn should_retrieve_transaction_aggregate_of_processed< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + let transaction_processed = + TransactionProcessed::random(&mut test_rng, Some(transaction_accepted.transaction_hash())); + let transaction_type_id = match transaction_accepted.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + + db.save_transaction_accepted( + transaction_accepted.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_accepted"); + + db.save_transaction_processed( + transaction_processed, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_processed"); + + db.get_transaction_aggregate_by_identifier( + &transaction_type_id, + &transaction_accepted.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction aggregate by hash"); +} + +pub async fn should_retrieve_transaction_aggregate_of_expired< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + let transaction_expired = + TransactionExpired::random(&mut test_rng, Some(transaction_accepted.transaction_hash())); + let transaction_type_id = match transaction_accepted.transaction_type_id() { + crate::sql::tables::transaction_type::TransactionTypeId::Deploy => { + TransactionTypeId::Deploy + } + crate::sql::tables::transaction_type::TransactionTypeId::Version1 => { + TransactionTypeId::Version1 + } + }; + + db.save_transaction_accepted( + transaction_accepted.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_accepted"); + + db.save_transaction_expired( + transaction_expired, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving transaction_expired"); + + db.get_transaction_aggregate_by_identifier( + &transaction_type_id, + &transaction_accepted.hex_encoded_hash(), + ) + .await + .expect("Error getting transaction aggregate by hash"); +} + +pub async fn should_save_and_retrieve_fault(db: DB) { + let mut test_rng = TestRng::new(); + let fault = Fault::random(&mut test_rng); + + db.save_fault( + fault.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving fault"); + + db.get_faults_by_era(fault.era_id.value()) + .await + .expect("Error getting faults by era"); + + db.get_faults_by_public_key(&fault.public_key.to_hex()) + .await + .expect("Error getting faults by public key"); +} + +pub async fn should_save_and_retrieve_fault_with_a_u64max( + db: DB, +) { + let mut test_rng = TestRng::new(); + let mut fault = Fault::random(&mut test_rng); + fault.era_id = EraId::new(u64::MAX); + + db.save_fault( + fault.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving fault with a u64::MAX era id"); + + let faults = db + .get_faults_by_era(u64::MAX) + .await + .expect("Error getting faults by era with u64::MAX era id"); + + assert_eq!(faults[0].payload().era_id.value(), u64::MAX); + + let faults = db + .get_faults_by_public_key(&fault.public_key.to_hex()) + .await + .expect("Error getting faults by public key with u64::MAX era id"); + + assert_eq!(faults[0].payload().era_id.value(), u64::MAX); +} + +pub async fn should_save_and_retrieve_finality_signature( + db: DB, +) { + let mut test_rng = TestRng::new(); + let finality_signature = FinalitySignature::random(&mut test_rng); + + db.save_finality_signature( + finality_signature.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving finality_signature"); + + db.get_finality_signatures_by_block(&finality_signature.hex_encoded_block_hash()) + .await + .expect("Error getting finality signatures by block_hash"); +} + +pub async fn should_save_and_retrieve_step(db: DB) { + let mut test_rng = TestRng::new(); + let step = Step::random(&mut test_rng); + + db.save_step( + step.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving step"); + + db.get_step_by_era(step.era_id.value()) + .await + .expect("Error getting step by era"); +} + +pub async fn should_save_and_retrieve_a_step_with_u64_max_era< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let mut step = Step::random(&mut test_rng); + step.era_id = EraId::new(u64::MAX); + + db.save_step( + step.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .expect("Error saving Step with u64::MAX era id"); + + let retrieved_step = db + .get_step_by_era(u64::MAX) + .await + .expect("Error retrieving Step with u64::MAX era id"); + + assert_eq!(retrieved_step.payload().era_id.value(), u64::MAX) +} + +pub async fn should_disallow_duplicate_event_id_from_source( + db: DB, +) { + let mut test_rng = TestRng::new(); + let event_id = test_rng.gen::(); + let block_added = BlockAdded::random(&mut test_rng); + + assert!(db + .save_block_added( + block_added.clone(), + event_id, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + let res = db + .save_block_added( + block_added, + event_id, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await; + assert!(matches!(res, Err(DatabaseWriteError::UniqueConstraint(_)))); + // This check is to ensure that the UNIQUE constraint being broken is from the event_log table rather than from the raw event table. + if let Err(DatabaseWriteError::UniqueConstraint(uc_err)) = res { + assert_eq!(uc_err.table, "event_log") + } +} + +pub async fn should_disallow_insert_of_existing_block_added( + db: DB, +) { + let mut test_rng = TestRng::new(); + let block_added = BlockAdded::random(&mut test_rng); + + assert!(db + .save_block_added( + block_added.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_block_added( + block_added, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "BlockAdded") + } +} + +pub async fn should_disallow_insert_of_existing_transaction_accepted< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_accepted = TransactionAccepted::random(&mut test_rng); + + assert!(db + .save_transaction_accepted( + transaction_accepted.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_transaction_accepted( + transaction_accepted, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "TransactionAccepted") + } +} + +pub async fn should_disallow_insert_of_existing_transaction_expired< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_expired = TransactionExpired::random(&mut test_rng, None); + + assert!(db + .save_transaction_expired( + transaction_expired.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_transaction_expired( + transaction_expired, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "TransactionExpired") + } +} + +pub async fn should_disallow_insert_of_existing_transaction_processed< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let transaction_processed = TransactionProcessed::random(&mut test_rng, None); + + assert!(db + .save_transaction_processed( + transaction_processed.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_transaction_processed( + transaction_processed, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "TransactionProcessed") + } +} + +pub async fn should_disallow_insert_of_existing_fault(db: DB) { + let mut test_rng = TestRng::new(); + let fault = Fault::random(&mut test_rng); + + assert!(db + .save_fault( + fault.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_fault( + fault, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "Fault") + } +} + +pub async fn should_disallow_insert_of_existing_finality_signature< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let finality_signature = FinalitySignature::random(&mut test_rng); + + assert!(db + .save_finality_signature( + finality_signature.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_finality_signature( + finality_signature, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "FinalitySignature") + } +} + +pub async fn should_disallow_insert_of_existing_step(db: DB) { + let mut test_rng = TestRng::new(); + let step = Step::random(&mut test_rng); + + assert!(db + .save_step( + step.clone(), + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + + let db_err = db + .save_step( + step, + 2, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .unwrap_err(); + + assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); + + // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log + if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { + assert_eq!(uc_err.table, "Step") + } +} + +pub async fn get_number_of_events_should_return_0(db: DB) { + assert_eq!(db.get_number_of_events().await.unwrap(), 0); +} + +pub async fn get_number_of_events_should_return_1_when_event_stored< + DB: DatabaseReader + DatabaseWriter, +>( + db: DB, +) { + let mut test_rng = TestRng::new(); + let fault = Fault::random(&mut test_rng); + + assert!(db + .save_fault( + fault, + 1, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await + .is_ok()); + assert_eq!(db.get_number_of_events().await.unwrap(), 1); +} + +pub fn fetch_event_log_data_query() -> SelectStatement { + let mut select = Query::select(); + select + .column(tables::event_log::EventLog::EventTypeId) + .column(tables::event_log::EventLog::ApiVersion) + .column(tables::event_log::EventLog::NetworkName) + .column(tables::event_log::EventLog::EventSourceAddress) + .from(tables::event_log::EventLog::Table); + select +} diff --git a/event_sidecar/src/database/types.rs b/event_sidecar/src/database/types.rs new file mode 100644 index 00000000..5e2ebb7a --- /dev/null +++ b/event_sidecar/src/database/types.rs @@ -0,0 +1,60 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; + +use crate::{ + BlockAdded, Fault, FinalitySignature, Step, TransactionAccepted, TransactionExpired, + TransactionProcessed, +}; + +/// This struct holds flags that steer DDL generation for specific databases. +pub struct DDLConfiguration { + /// Postgresql doesn't support unsigned integers, so for some fields we need to be mindful of the fact that in postgres we might need to use a bigger type to accomodate scope of field + pub db_supports_unsigned: bool, +} + +/// Header of the SSE envelope +#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)] +pub struct EnvelopeHeader { + api_version: String, + network_name: String, +} + +/// Wrapper envelope for SSE events. It contains the event in `payload` field and some metadata in `header` field. +#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)] +#[aliases( + BlockAddedEnveloped = SseEnvelope, + TransactionAcceptedEnveloped = SseEnvelope, + TransactionExpiredEnveloped = SseEnvelope, + TransactionProcessedEnveloped = SseEnvelope, + FaultEnveloped = SseEnvelope, + FinalitySignatureEnveloped = SseEnvelope, + StepEnveloped = SseEnvelope, +)] +pub struct SseEnvelope { + header: EnvelopeHeader, + payload: T, +} + +impl SseEnvelope { + pub fn new(sse_event: T, api_version: String, network_name: String) -> SseEnvelope { + SseEnvelope { + header: EnvelopeHeader { + api_version, + network_name, + }, + payload: sse_event, + } + } + + pub fn payload(&self) -> &T { + &self.payload + } + + pub fn api_version(&self) -> &String { + &self.header.api_version + } + + pub fn network_name(&self) -> &String { + &self.header.network_name + } +} diff --git a/sidecar/src/database/writer_generator.rs b/event_sidecar/src/database/writer_generator.rs similarity index 79% rename from sidecar/src/database/writer_generator.rs rename to event_sidecar/src/database/writer_generator.rs index 35a4a81f..eab89356 100644 --- a/sidecar/src/database/writer_generator.rs +++ b/event_sidecar/src/database/writer_generator.rs @@ -9,7 +9,7 @@ use anyhow::Context; use async_trait::async_trait; use casper_types::AsymmetricType; #[cfg(feature = "additional-metrics")] -use casper_event_types::metrics; +use metrics::db::DB_OPERATION_TIMES; use itertools::Itertools; use tokio::sync::Mutex; use $crate::{ @@ -52,6 +52,9 @@ impl DatabaseWriter for $extended_type { block_added: BlockAdded, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); @@ -63,6 +66,8 @@ impl DatabaseWriter for $extended_type { &event_source_address, event_id, &encoded_hash, + &api_version, + &network_name, &mut transaction, ) .await?; @@ -84,30 +89,37 @@ impl DatabaseWriter for $extended_type { res } - async fn save_deploy_accepted( + async fn save_transaction_accepted( &self, - deploy_accepted: DeployAccepted, + transaction_accepted: TransactionAccepted, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); + let transaction_type_id = transaction_accepted.transaction_type_id(); let mut transaction = self.get_transaction().await?; - let json = serde_json::to_string(&deploy_accepted)?; - let encoded_hash = deploy_accepted.hex_encoded_hash(); + let json = serde_json::to_string(&transaction_accepted)?; + let transaction_identifier = transaction_accepted.identifier(); let event_log_id = save_event_log( - EventTypeId::DeployAccepted as u8, + EventTypeId::TransactionAccepted as u8, &event_source_address, event_id, - &encoded_hash, + &transaction_identifier, + &api_version, + &network_name, &mut transaction, ) .await?; + let transaction_type_id_raw = transaction_type_id as u8; let batched_insert_stmts = vec![ - tables::deploy_accepted::create_insert_stmt(encoded_hash.clone(), json, event_log_id)?, - tables::deploy_event::create_insert_stmt(event_log_id, encoded_hash)?, + tables::transaction_accepted::create_insert_stmt(transaction_type_id_raw, transaction_identifier.clone(), json, event_log_id)?, + tables::transaction_event::create_insert_stmt(event_log_id, transaction_type_id_raw, transaction_identifier)?, ] .iter() .map(|stmt| stmt.to_string($query_materializer_expr)) @@ -118,33 +130,40 @@ impl DatabaseWriter for $extended_type { transaction.commit().await?; } #[cfg(feature = "additional-metrics")] - observe_db_operation_time("save_deploy_accepted", start); + observe_db_operation_time("save_transaction_accepted", start); res } - async fn save_deploy_processed( + async fn save_transaction_processed( &self, - deploy_processed: DeployProcessed, + transaction_processed: TransactionProcessed, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); + let transaction_type_id = transaction_processed.transaction_type_id(); let mut transaction = self.get_transaction().await?; - let json = serde_json::to_string(&deploy_processed)?; - let encoded_hash = deploy_processed.hex_encoded_hash(); + let json = serde_json::to_string(&transaction_processed)?; + let identifier = transaction_processed.identifier(); let event_log_id = save_event_log( - EventTypeId::DeployProcessed as u8, + EventTypeId::TransactionProcessed as u8, &event_source_address, event_id, - &encoded_hash, + &identifier, + &api_version, + &network_name, &mut transaction, ) .await?; + let transaction_type_id_raw = transaction_type_id as u8; let batched_insert_stmts = vec![ - tables::deploy_processed::create_insert_stmt(encoded_hash.clone(), json, event_log_id)?, - tables::deploy_event::create_insert_stmt(event_log_id, encoded_hash)?, + tables::transaction_processed::create_insert_stmt(transaction_type_id_raw, identifier.clone(), json, event_log_id)?, + tables::transaction_event::create_insert_stmt(event_log_id, transaction_type_id_raw, identifier)?, ] .iter() .map(|stmt| stmt.to_string($query_materializer_expr)) @@ -155,33 +174,40 @@ impl DatabaseWriter for $extended_type { transaction.commit().await?; } #[cfg(feature = "additional-metrics")] - observe_db_operation_time("save_deploy_processed", start); + observe_db_operation_time("save_transaction_processed", start); res } - async fn save_deploy_expired( + async fn save_transaction_expired( &self, - deploy_expired: DeployExpired, + transaction_expired: TransactionExpired, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); + let transaction_type_id = transaction_expired.transaction_type_id(); let mut transaction = self.get_transaction().await?; - let json = serde_json::to_string(&deploy_expired)?; - let encoded_hash = deploy_expired.hex_encoded_hash(); + let json = serde_json::to_string(&transaction_expired)?; + let transaction_identifier = transaction_expired.identifier(); let event_log_id = save_event_log( - EventTypeId::DeployExpired as u8, + EventTypeId::TransactionExpired as u8, &event_source_address, event_id, - &encoded_hash, + &transaction_identifier, + &api_version, + &network_name, &mut transaction, ) .await?; + let transaction_type_id_raw = transaction_type_id as u8; let batched_insert_stmts = vec![ - tables::deploy_expired::create_insert_stmt(encoded_hash.clone(), event_log_id, json)?, - tables::deploy_event::create_insert_stmt(event_log_id, encoded_hash)?, + tables::transaction_expired::create_insert_stmt(transaction_type_id_raw, transaction_identifier.clone(), event_log_id, json)?, + tables::transaction_event::create_insert_stmt(event_log_id, transaction_type_id_raw, transaction_identifier)?, ] .iter() .map(|stmt| stmt.to_string($query_materializer_expr)) @@ -192,7 +218,7 @@ impl DatabaseWriter for $extended_type { transaction.commit().await?; } #[cfg(feature = "additional-metrics")] - observe_db_operation_time("save_deploy_expired", start); + observe_db_operation_time("save_transaction_expired", start); res } @@ -201,6 +227,9 @@ impl DatabaseWriter for $extended_type { fault: Fault, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); @@ -214,6 +243,8 @@ impl DatabaseWriter for $extended_type { &event_source_address, event_id, &event_key, + &api_version, + &network_name, &mut transaction, ) .await?; @@ -235,6 +266,9 @@ impl DatabaseWriter for $extended_type { finality_signature: FinalitySignature, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); @@ -249,6 +283,8 @@ impl DatabaseWriter for $extended_type { &event_source_address, event_id, &event_key, + &api_version, + &network_name, &mut transaction, ) .await?; @@ -275,6 +311,8 @@ impl DatabaseWriter for $extended_type { step: Step, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); @@ -287,6 +325,8 @@ impl DatabaseWriter for $extended_type { &event_source_address, event_id, &era_id.to_string(), + &api_version, + &network_name, &mut transaction, ) .await?; @@ -307,6 +347,9 @@ impl DatabaseWriter for $extended_type { &self, event_id: u32, event_source_address: String, + api_version: String, +network_name: String, + ) -> Result { #[cfg(feature = "additional-metrics")] let start = Instant::now(); @@ -322,6 +365,8 @@ impl DatabaseWriter for $extended_type { &event_source_address, event_id, &event_key, + &api_version, + &network_name, &mut transaction, ) .await?; @@ -399,6 +444,8 @@ async fn save_event_log( event_source_address: &str, event_id: u32, event_key: &str, + api_version: &str, + newtork_name: &str, transaction: &mut Transaction<'_, $database_type>, ) -> Result { let insert_to_event_log_stmt = tables::event_log::create_insert_stmt( @@ -406,6 +453,8 @@ async fn save_event_log( event_source_address, event_id, event_key, + api_version, + newtork_name, )? .to_string($query_materializer_expr); let event_log_id = transaction @@ -420,7 +469,7 @@ async fn save_event_log( #[cfg(feature = "additional-metrics")] fn observe_db_operation_time(operation_name: &str, start: Instant) { let duration = start.elapsed(); - metrics::DB_OPERATION_TIMES + DB_OPERATION_TIMES .with_label_values(&[operation_name]) .observe(duration.as_nanos() as f64); } diff --git a/event_sidecar/src/event_handling_service.rs b/event_sidecar/src/event_handling_service.rs new file mode 100644 index 00000000..d9b6d153 --- /dev/null +++ b/event_sidecar/src/event_handling_service.rs @@ -0,0 +1,105 @@ +use crate::{ + types::database::DatabaseWriteError, FinalitySignature, Step, TransactionAccepted, + TransactionProcessed, +}; +use async_trait::async_trait; +use casper_event_listener::SseEvent; +use casper_event_types::{sse_data::SseData, Filter}; +use casper_types::{ + Block, BlockHash, EraId, ProtocolVersion, PublicKey, Timestamp, TransactionHash, +}; +use metrics::observe_error; +use tokio::sync::mpsc::Sender; +use tracing::{debug, trace, warn}; +pub mod db_saving_event_handling_service; +pub mod no_db_event_handling_service; +pub use { + db_saving_event_handling_service::DbSavingEventHandlingService, + no_db_event_handling_service::NoDbEventHandlingService, +}; + +#[async_trait] +pub trait EventHandlingService { + async fn handle_api_version(&self, version: ProtocolVersion, filter: Filter); + + async fn handle_block_added( + &self, + block_hash: BlockHash, + block: Box, + sse_event: SseEvent, + ); + + async fn handle_transaction_accepted( + &self, + transaction_accepted: TransactionAccepted, + sse_event: SseEvent, + ); + + async fn handle_transaction_expired( + &self, + transaction_hash: TransactionHash, + sse_event: SseEvent, + ); + + async fn handle_transaction_processed( + &self, + transaction_processed: TransactionProcessed, + sse_event: SseEvent, + ); + + async fn handle_fault( + &self, + era_id: EraId, + timestamp: Timestamp, + public_key: PublicKey, + sse_event: SseEvent, + ); + + async fn handle_step(&self, step: Step, sse_event: SseEvent); + + async fn handle_finality_signature( + &self, + finality_signature: FinalitySignature, + sse_event: SseEvent, + ); + + async fn handle_shutdown(&self, sse_event: SseEvent); +} + +async fn handle_database_save_result( + entity_name: &str, + entity_identifier: &str, + res: Result, + outbound_sse_data_sender: &Sender<(SseData, Option)>, + inbound_filter: Filter, + sse_data: SseData, +) { + match res { + Ok(_) => { + if let Err(error) = outbound_sse_data_sender + .send((sse_data, Some(inbound_filter))) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + } + Err(DatabaseWriteError::UniqueConstraint(uc_err)) => { + debug!( + "Already received {} ({}), logged in event_log", + entity_name, entity_identifier, + ); + trace!(?uc_err); + } + Err(other_err) => { + count_error(format!("db_save_error_{}", entity_name).as_str()); + warn!(?other_err, "Unexpected error saving {}", entity_identifier); + } + } +} + +fn count_error(reason: &str) { + observe_error("event_handling", reason); +} diff --git a/event_sidecar/src/event_handling_service/db_saving_event_handling_service.rs b/event_sidecar/src/event_handling_service/db_saving_event_handling_service.rs new file mode 100644 index 00000000..f4bf3888 --- /dev/null +++ b/event_sidecar/src/event_handling_service/db_saving_event_handling_service.rs @@ -0,0 +1,326 @@ +use crate::{ + event_handling_service::count_error, + transaction_hash_to_identifier, + types::database::{DatabaseReader, DatabaseWriteError, DatabaseWriter}, + BlockAdded, Fault, FinalitySignature, Step, TransactionAccepted, TransactionExpired, + TransactionProcessed, +}; +use async_trait::async_trait; +use casper_event_listener::SseEvent; +use casper_event_types::{sse_data::SseData, Filter}; +use casper_types::{ + Block, BlockHash, EraId, ProtocolVersion, PublicKey, Timestamp, TransactionHash, +}; +use derive_new::new; +use hex_fmt::HexFmt; +use metrics::sse::observe_contract_messages; +use tokio::sync::mpsc::Sender; +use tracing::{debug, info, warn}; + +use super::{handle_database_save_result, EventHandlingService}; + +#[derive(new, Clone)] +pub struct DbSavingEventHandlingService { + outbound_sse_data_sender: Sender<(SseData, Option)>, + database: Db, + enable_event_logging: bool, +} + +#[async_trait] +impl EventHandlingService for DbSavingEventHandlingService +where + Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync + 'static, +{ + async fn handle_api_version(&self, version: ProtocolVersion, filter: Filter) { + if let Err(error) = self + .outbound_sse_data_sender + .send((SseData::ApiVersion(version), Some(filter))) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + if self.enable_event_logging { + info!(%version, "API Version"); + } + } + + async fn handle_block_added( + &self, + block_hash: BlockHash, + block: Box, + sse_event: SseEvent, + ) { + if self.enable_event_logging { + let hex_block_hash = HexFmt(block_hash.inner()); + info!("Block Added: {:18}", hex_block_hash); + debug!("Block Added: {}", hex_block_hash); + } + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let res = self + .database + .save_block_added( + BlockAdded::new(block_hash, block), //TODO maybe we could avoid these clones + id, + source, + api_version, + network_name, + ) + .await; + handle_database_save_result( + "BlockAdded", + HexFmt(block_hash.inner()).to_string().as_str(), + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_accepted( + &self, + transaction_accepted: TransactionAccepted, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_accepted.identifier(); + if self.enable_event_logging { + info!("Transaction Accepted: {:18}", entity_identifier); + debug!("Transaction Accepted: {}", entity_identifier); + } + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let res = self + .database + .save_transaction_accepted(transaction_accepted, id, source, api_version, network_name) + .await; + handle_database_save_result( + "TransactionAccepted", + &entity_identifier, + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_expired( + &self, + transaction_hash: TransactionHash, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_hash_to_identifier(&transaction_hash); + if self.enable_event_logging { + info!("Transaction Expired: {:18}", entity_identifier); + debug!("Transaction Expired: {}", entity_identifier); + } + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let res = self + .database + .save_transaction_expired( + TransactionExpired::new(transaction_hash), + id, + source.to_string(), + api_version, + network_name, + ) + .await; + handle_database_save_result( + "TransactionExpired", + &entity_identifier, + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_processed( + &self, + transaction_processed: TransactionProcessed, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_processed.identifier(); + if self.enable_event_logging { + info!("Transaction Processed: {:18}", entity_identifier); + debug!("Transaction Processed: {}", entity_identifier); + } + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let messages_len = transaction_processed.messages().len(); + + if messages_len > 0 { + observe_contract_messages("all", messages_len); + } + let res = self + .database + .save_transaction_processed( + transaction_processed, + id, + source.to_string(), + api_version, + network_name, + ) + .await; + if res.is_ok() && messages_len > 0 { + observe_contract_messages("unique", messages_len); + } + handle_database_save_result( + "TransactionProcessed", + &entity_identifier, + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_fault( + &self, + era_id: EraId, + timestamp: Timestamp, + public_key: PublicKey, + sse_event: SseEvent, + ) { + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let fault_identifier = format!("{}-{}", era_id.value(), public_key); + let fault = Fault::new(era_id, public_key, timestamp); + warn!(%fault, "Fault reported"); + let res = self + .database + .save_fault(fault, id, source, api_version, network_name) + .await; + + handle_database_save_result( + "Fault", + &fault_identifier, + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_step(&self, step: Step, sse_event: SseEvent) { + let era_id = step.era_id; + let step_identifier = format!("{}", era_id.value()); + if self.enable_event_logging { + info!("Step at era: {}", step_identifier); + } + + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let res = self + .database + .save_step(step, id, source, api_version, network_name) + .await; + handle_database_save_result( + "Step", + step_identifier.as_str(), + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_finality_signature( + &self, + finality_signature: FinalitySignature, + sse_event: SseEvent, + ) { + if self.enable_event_logging { + debug!( + "Finality Signature: {} for {}", + finality_signature.signature(), + finality_signature.block_hash() + ); + } + let id = sse_event.id; + let source = sse_event.source.to_string(); + let api_version = sse_event.api_version; + let network_name = sse_event.network_name; + let filter = sse_event.inbound_filter; + let res = self + .database + .save_finality_signature( + finality_signature.clone(), + id, + source, + api_version, + network_name, + ) + .await; + handle_database_save_result( + "FinalitySignature", + "", + res, + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_shutdown(&self, sse_event: SseEvent) { + warn!("Node ({}) is unavailable", sse_event.source.to_string()); + let res = self + .database + .save_shutdown( + sse_event.id, + sse_event.source.to_string(), + sse_event.api_version, + sse_event.network_name, + ) + .await; + match res { + Ok(_) | Err(DatabaseWriteError::UniqueConstraint(_)) => { + // We push to outbound on UniqueConstraint error because in sse_server we match shutdowns to outbounds based on the filter they came from to prevent duplicates. + // But that also means that we need to pass through all the Shutdown events so the sse_server can determine to which outbound filters they need to be pushed (we + // don't store in DB the information from which filter did shutdown came). + if let Err(error) = self + .outbound_sse_data_sender + .send((SseData::Shutdown, Some(sse_event.inbound_filter))) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + } + Err(other_err) => { + count_error("db_save_error_shutdown"); + warn!(?other_err, "Unexpected error saving Shutdown") + } + } + } +} diff --git a/event_sidecar/src/event_handling_service/no_db_event_handling_service.rs b/event_sidecar/src/event_handling_service/no_db_event_handling_service.rs new file mode 100644 index 00000000..85a7d4a4 --- /dev/null +++ b/event_sidecar/src/event_handling_service/no_db_event_handling_service.rs @@ -0,0 +1,215 @@ +use crate::{ + event_handling_service::handle_database_save_result, transaction_hash_to_identifier, Fault, + FinalitySignature, Step, TransactionAccepted, TransactionProcessed, +}; +use async_trait::async_trait; +use casper_event_listener::SseEvent; +use casper_event_types::{sse_data::SseData, Filter}; +use casper_types::{ + Block, BlockHash, EraId, ProtocolVersion, PublicKey, Timestamp, TransactionHash, +}; +use derive_new::new; +use hex_fmt::HexFmt; +use metrics::sse::observe_contract_messages; +use tokio::sync::mpsc::Sender; +use tracing::{debug, info, warn}; + +use super::EventHandlingService; + +#[derive(new, Clone)] +pub struct NoDbEventHandlingService { + outbound_sse_data_sender: Sender<(SseData, Option)>, + enable_event_logging: bool, +} + +#[async_trait] +impl EventHandlingService for NoDbEventHandlingService { + async fn handle_api_version(&self, version: ProtocolVersion, filter: Filter) { + if let Err(error) = self + .outbound_sse_data_sender + .send((SseData::ApiVersion(version), Some(filter))) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + if self.enable_event_logging { + info!(%version, "API Version"); + } + } + + async fn handle_block_added( + &self, + block_hash: BlockHash, + _block: Box, + sse_event: SseEvent, + ) { + if self.enable_event_logging { + let hex_block_hash = HexFmt(block_hash.inner()); + info!("Block Added: {:18}", hex_block_hash); + debug!("Block Added: {}", hex_block_hash); + } + let filter = sse_event.inbound_filter; + handle_database_save_result( + "BlockAdded", + HexFmt(block_hash.inner()).to_string().as_str(), + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_accepted( + &self, + transaction_accepted: TransactionAccepted, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_accepted.identifier(); + if self.enable_event_logging { + info!("Transaction Accepted: {:18}", entity_identifier); + debug!("Transaction Accepted: {}", entity_identifier); + } + let filter = sse_event.inbound_filter; + handle_database_save_result( + "TransactionAccepted", + &entity_identifier, + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_expired( + &self, + transaction_hash: TransactionHash, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_hash_to_identifier(&transaction_hash); + if self.enable_event_logging { + info!("Transaction Expired: {:18}", entity_identifier); + debug!("Transaction Expired: {}", entity_identifier); + } + let filter = sse_event.inbound_filter; + handle_database_save_result( + "TransactionExpired", + &entity_identifier, + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_transaction_processed( + &self, + transaction_processed: TransactionProcessed, + sse_event: SseEvent, + ) { + let entity_identifier = transaction_processed.identifier(); + if self.enable_event_logging { + info!("Transaction Processed: {:18}", entity_identifier); + debug!("Transaction Processed: {}", entity_identifier); + } + let filter = sse_event.inbound_filter; + let messages_len = transaction_processed.messages().len(); + + if messages_len > 0 { + observe_contract_messages("all", messages_len); + } + handle_database_save_result( + "TransactionProcessed", + &entity_identifier, + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_fault( + &self, + era_id: EraId, + timestamp: Timestamp, + public_key: PublicKey, + sse_event: SseEvent, + ) { + let filter = sse_event.inbound_filter; + let fault_identifier = format!("{}-{}", era_id.value(), public_key); + let fault = Fault::new(era_id, public_key, timestamp); + warn!(%fault, "Fault reported"); + + handle_database_save_result( + "Fault", + &fault_identifier, + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_step(&self, step: Step, sse_event: SseEvent) { + let era_id = step.era_id; + let step_identifier = format!("{}", era_id.value()); + if self.enable_event_logging { + info!("Step at era: {}", step_identifier); + } + let filter = sse_event.inbound_filter; + handle_database_save_result( + "Step", + step_identifier.as_str(), + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_finality_signature( + &self, + finality_signature: FinalitySignature, + sse_event: SseEvent, + ) { + if self.enable_event_logging { + debug!( + "Finality Signature: {} for {}", + finality_signature.signature(), + finality_signature.block_hash() + ); + } + let filter = sse_event.inbound_filter; + handle_database_save_result( + "FinalitySignature", + "", + Ok(()), + &self.outbound_sse_data_sender, + filter, + sse_event.data, + ) + .await; + } + + async fn handle_shutdown(&self, sse_event: SseEvent) { + warn!("Node ({}) is unavailable", sse_event.source.to_string()); + if let Err(error) = self + .outbound_sse_data_sender + .send((SseData::Shutdown, Some(sse_event.inbound_filter))) + .await + { + debug!( + "Error when sending to outbound_sse_data_sender. Error: {}", + error + ); + } + } +} diff --git a/sidecar/src/event_stream_server.rs b/event_sidecar/src/event_stream_server.rs similarity index 91% rename from sidecar/src/event_stream_server.rs rename to event_sidecar/src/event_stream_server.rs index 99209c90..8efcab0b 100644 --- a/sidecar/src/event_stream_server.rs +++ b/event_sidecar/src/event_stream_server.rs @@ -50,12 +50,7 @@ use warp::Filter; /// that a new client can retrieve the entire set of buffered events if desired. const ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE: u32 = 20; -pub type OutboundSender = UnboundedSender<( - Option, - SseData, - Option, - Option, -)>; +pub type OutboundSender = UnboundedSender<(Option, SseData, Option)>; #[derive(Debug)] pub(crate) struct EventStreamServer { @@ -68,7 +63,11 @@ pub(crate) struct EventStreamServer { } impl EventStreamServer { - pub(crate) fn new(config: Config, storage_path: PathBuf) -> Result { + pub(crate) fn new( + config: Config, + storage_path: PathBuf, + enable_legacy_filters: bool, + ) -> Result { let required_address = resolve_address_and_retype(&config.address)?; let event_indexer = EventIndexer::new(storage_path); let (sse_data_sender, sse_data_receiver) = mpsc::unbounded_channel(); @@ -81,6 +80,7 @@ impl EventStreamServer { } = ChannelsAndFilter::new( get_broadcast_channel_size(&config), config.max_concurrent_subscribers, + enable_legacy_filters, ); let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); let (listening_address, server_with_shutdown) = @@ -110,19 +110,14 @@ impl EventStreamServer { } /// Broadcasts the SSE data to all clients connected to the event stream. - pub(crate) fn broadcast( - &mut self, - sse_data: SseData, - inbound_filter: Option, - maybe_json_data: Option, - ) { + pub(crate) fn broadcast(&mut self, sse_data: SseData, inbound_filter: Option) { let event_index = match sse_data { SseData::ApiVersion(..) => None, _ => Some(self.event_indexer.next_index()), }; let _ = self .sse_data_sender - .send((event_index, sse_data, inbound_filter, maybe_json_data)); + .send((event_index, sse_data, inbound_filter)); } } diff --git a/sidecar/src/event_stream_server/config.rs b/event_sidecar/src/event_stream_server/config.rs similarity index 100% rename from sidecar/src/event_stream_server/config.rs rename to event_sidecar/src/event_stream_server/config.rs diff --git a/event_sidecar/src/event_stream_server/endpoint.rs b/event_sidecar/src/event_stream_server/endpoint.rs new file mode 100644 index 00000000..a2f84507 --- /dev/null +++ b/event_sidecar/src/event_stream_server/endpoint.rs @@ -0,0 +1,24 @@ +use std::fmt::{Display, Formatter}; + +/// Enum representing all possible endpoints sidecar can have. +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub enum Endpoint { + Events, + Main, + Deploys, + Sigs, + Sidecar, +} + +impl Display for Endpoint { + /// This implementation is for test only and created to mimick how Display is implemented for Filter. + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Endpoint::Events => write!(f, "events"), + Endpoint::Main => write!(f, "events/main"), + Endpoint::Deploys => write!(f, "events/deploys"), + Endpoint::Sigs => write!(f, "events/sigs"), + Endpoint::Sidecar => write!(f, "events/sidecar"), + } + } +} diff --git a/sidecar/src/event_stream_server/event_indexer.rs b/event_sidecar/src/event_stream_server/event_indexer.rs similarity index 95% rename from sidecar/src/event_stream_server/event_indexer.rs rename to event_sidecar/src/event_stream_server/event_indexer.rs index b3f3865d..353462e2 100644 --- a/sidecar/src/event_stream_server/event_indexer.rs +++ b/event_sidecar/src/event_stream_server/event_indexer.rs @@ -13,6 +13,7 @@ pub(super) struct EventIndexer { } impl EventIndexer { + #![allow(clippy::cognitive_complexity)] pub(super) fn new(storage_path: PathBuf) -> Self { fs::create_dir_all(&storage_path).unwrap_or_else(|err| { error!("Failed to create directory for sse cache: {}", err); @@ -32,6 +33,10 @@ impl EventIndexer { Ok(cached_bytes) => { if cached_bytes.len() == bytes.len() { bytes.copy_from_slice(cached_bytes.as_slice()); + debug!( + file = %persistent_cache.display(), + "successfully read sse index from cache file" + ); } else { warn!( file = %persistent_cache.display(), diff --git a/sidecar/src/event_stream_server/http_server.rs b/event_sidecar/src/event_stream_server/http_server.rs similarity index 90% rename from sidecar/src/event_stream_server/http_server.rs rename to event_sidecar/src/event_stream_server/http_server.rs index 58dfda57..3bade158 100644 --- a/sidecar/src/event_stream_server/http_server.rs +++ b/event_sidecar/src/event_stream_server/http_server.rs @@ -1,14 +1,11 @@ -use std::str::FromStr; - use super::{ config::Config, event_indexer::EventIndex, sse_server::{BroadcastChannelMessage, Id, NewSubscriberInfo, ServerSentEvent}, }; -use casper_event_types::{sse_data::SseData, Filter}; +use casper_event_types::{sse_data::SseData, Filter, SIDECAR_VERSION}; use casper_types::ProtocolVersion; use futures::{future, Future, FutureExt}; -use once_cell::sync::Lazy; use tokio::{ select, sync::{ @@ -20,15 +17,8 @@ use tokio::{ }; use tracing::{error, info, trace}; use wheelbuf::WheelBuf; -pub type InboundData = (Option, SseData, Option, Option); -pub type OutboundReceiver = - mpsc::UnboundedReceiver<(Option, SseData, Option, Option)>; -pub static SIDECAR_VERSION: Lazy = Lazy::new(|| { - let major: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_MAJOR")).unwrap(); - let minor: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_MINOR")).unwrap(); - let patch: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_PATCH")).unwrap(); - ProtocolVersion::from_parts(major, minor, patch) -}); +pub type InboundData = (Option, SseData, Option); +pub type OutboundReceiver = mpsc::UnboundedReceiver<(Option, SseData, Option)>; /// Run the HTTP server. /// /// * `server_with_shutdown` is the actual server as a future which can be gracefully shut down. @@ -100,6 +90,13 @@ async fn send_api_version_from_global_state( .send(ServerSentEvent::initial_event(protocol_version)) } +async fn send_legacy_comment( + subscriber: &NewSubscriberInfo, +) -> Result<(), SendError> { + subscriber + .initial_events_sender + .send(ServerSentEvent::legacy_comment_event()) +} async fn send_sidecar_version( subscriber: &NewSubscriberInfo, ) -> Result<(), SendError> { @@ -118,13 +115,13 @@ async fn handle_incoming_data( broadcaster: &broadcast::Sender, ) -> Result<(), ()> { match maybe_data { - Some((maybe_event_index, data, inbound_filter, maybe_json_data)) => { + Some((maybe_event_index, data, inbound_filter)) => { // Buffer the data and broadcast it to subscribed clients. trace!("Event stream server received {:?}", data); let event = ServerSentEvent { id: maybe_event_index, - data: data.clone(), - json_data: maybe_json_data, + comment: None, + data: Some(data.clone()), inbound_filter, }; match data { @@ -157,6 +154,9 @@ async fn register_new_subscriber( buffer: &WheelBuf, (ProtocolVersion, ServerSentEvent)>, latest_protocol_version: Option, ) { + if subscriber.enable_legacy_filters { + let _ = send_legacy_comment(&subscriber).await; + } let _ = send_sidecar_version(&subscriber).await; let mut observed_events = false; // If the client supplied a "start_from" index, provide the buffered events. diff --git a/sidecar/src/event_stream_server/sse_server.rs b/event_sidecar/src/event_stream_server/sse_server.rs similarity index 60% rename from sidecar/src/event_stream_server/sse_server.rs rename to event_sidecar/src/event_stream_server/sse_server.rs index 15dc64e6..67e8da74 100644 --- a/sidecar/src/event_stream_server/sse_server.rs +++ b/event_sidecar/src/event_stream_server/sse_server.rs @@ -3,12 +3,24 @@ use super::endpoint::Endpoint; #[cfg(feature = "additional-metrics")] use crate::utils::start_metrics_thread; -use casper_event_types::{sse_data::EventFilter, sse_data::SseData, Deploy, Filter as SseFilter}; +use casper_event_types::{ + legacy_sse_data::LegacySseData, + sse_data::{EventFilter, SseData}, + Filter as SseFilter, +}; use casper_types::ProtocolVersion; +#[cfg(test)] +use casper_types::Transaction; use futures::{future, Stream, StreamExt}; use http::StatusCode; use hyper::Body; +#[cfg(test)] +use once_cell::sync::Lazy; +#[cfg(test)] +use regex::Regex; +#[cfg(test)] use serde::Serialize; +#[cfg(test)] use serde_json::Value; use std::{ collections::{HashMap, HashSet}, @@ -35,51 +47,72 @@ use warp::{ /// The URL root path. pub const SSE_API_ROOT_PATH: &str = "events"; -/// The URL path part to subscribe to all events other than `DeployAccepted`s and -/// `FinalitySignature`s. + +/// The URL path part to subscribe to "backwards compatible" 'main' event stream. +/// It will check for events from the nodes firehose and those which can be translated to 1.x format will be translated. pub const SSE_API_MAIN_PATH: &str = "main"; /// The URL path part to subscribe to only `DeployAccepted` events. pub const SSE_API_DEPLOYS_PATH: &str = "deploys"; /// The URL path part to subscribe to only `FinalitySignature` events. pub const SSE_API_SIGNATURES_PATH: &str = "sigs"; + /// The URL path part to subscribe to sidecar specific events. pub const SSE_API_SIDECAR_PATH: &str = "sidecar"; /// The URL query string field name. pub const QUERY_FIELD: &str = "start_from"; +// This notice should go away once we remove the legacy filter endpoints. +pub const LEGACY_ENDPOINT_NOTICE: &str = "This endpoint is NOT a 1 to 1 representation of events coming off of the node. Some events are not transformable to legacy format. Please consult the documentation for details. This endpoint is deprecated and will be removed in a future release. Please migrate to the /events endpoint instead."; + /// The filter associated with `/events` path. -const EVENTS_FILTER: [EventFilter; 5] = [ +const EVENTS_FILTER: [EventFilter; 8] = [ EventFilter::ApiVersion, EventFilter::BlockAdded, - EventFilter::DeployProcessed, + EventFilter::TransactionAccepted, + EventFilter::TransactionProcessed, + EventFilter::TransactionExpired, EventFilter::Fault, EventFilter::FinalitySignature, + EventFilter::Step, ]; - /// The filter associated with `/events/main` path. const MAIN_FILTER: [EventFilter; 6] = [ EventFilter::ApiVersion, EventFilter::BlockAdded, - EventFilter::DeployProcessed, - EventFilter::DeployExpired, + EventFilter::TransactionProcessed, + EventFilter::TransactionExpired, EventFilter::Fault, EventFilter::Step, ]; /// The filter associated with `/events/deploys` path. -const DEPLOYS_FILTER: [EventFilter; 2] = [EventFilter::ApiVersion, EventFilter::DeployAccepted]; +const DEPLOYS_FILTER: [EventFilter; 2] = + [EventFilter::ApiVersion, EventFilter::TransactionAccepted]; /// The filter associated with `/events/sigs` path. const SIGNATURES_FILTER: [EventFilter; 2] = [EventFilter::ApiVersion, EventFilter::FinalitySignature]; /// The filter associated with `/events/sidecar` path. const SIDECAR_FILTER: [EventFilter; 1] = [EventFilter::SidecarVersion]; + +#[cfg(test)] +static ENDS_WITH_ID_REGEX: Lazy = Lazy::new(|| Regex::new(r"\nid:\d*$").unwrap()); +#[cfg(test)] +static STARTS_WITH_REGEX: Lazy = Lazy::new(|| Regex::new(r"^data:").unwrap()); + /// The "id" field of the events sent on the event stream to clients. pub type Id = u32; -type UrlProps = (&'static [EventFilter], &'static Endpoint, Option); +pub type IsLegacyFilter = bool; +type UrlProps = ( + &'static [EventFilter], + &'static Endpoint, + Option, + IsLegacyFilter, +); +#[cfg(test)] #[derive(Serialize)] #[serde(rename_all = "PascalCase")] -pub(super) struct DeployAccepted { - pub(super) deploy_accepted: Arc, +pub(super) struct TransactionAccepted { + pub(super) transaction_accepted: Arc, } /// The components of a single SSE. @@ -87,10 +120,13 @@ pub(super) struct DeployAccepted { pub(super) struct ServerSentEvent { /// The ID should only be `None` where the `data` is `SseData::ApiVersion`. pub(super) id: Option, - /// Payload of the event - pub(super) data: SseData, - /// Optional raw input for the edge-case scenario in which the output needs to receive exactly the same text as we got from inbound. - pub(super) json_data: Option, + /// Payload of the event. This generally shouldn't be an Option, but untill + /// we have legacy filter endpoints we need to be prepared to have a event + /// that is a comment and has no data. When legacy filter endpoints go away + /// this should be of type SseData + pub(super) data: Option, + /// Comment of the event + pub(super) comment: Option<&'static str>, /// Information which endpoint we got the event from pub(super) inbound_filter: Option, } @@ -100,16 +136,24 @@ impl ServerSentEvent { pub(super) fn initial_event(client_api_version: ProtocolVersion) -> Self { ServerSentEvent { id: None, - data: SseData::ApiVersion(client_api_version), - json_data: None, + comment: None, + data: Some(SseData::ApiVersion(client_api_version)), inbound_filter: None, } } pub(super) fn sidecar_version_event(version: ProtocolVersion) -> Self { ServerSentEvent { id: None, - data: SseData::SidecarVersion(version), - json_data: None, + comment: None, + data: Some(SseData::SidecarVersion(version)), + inbound_filter: None, + } + } + pub(super) fn legacy_comment_event() -> Self { + ServerSentEvent { + id: None, + comment: Some(LEGACY_ENDPOINT_NOTICE), + data: None, inbound_filter: None, } } @@ -129,18 +173,32 @@ pub(super) enum BroadcastChannelMessage { Shutdown, } -fn event_to_warp_event(event: &ServerSentEvent) -> warp::sse::Event { - let maybe_value = event - .json_data - .as_ref() - .map(|el| serde_json::from_str::(el).unwrap()); - match &maybe_value { - Some(json_data) => WarpServerSentEvent::default().json_data(json_data), - None => WarpServerSentEvent::default().json_data(&event.data), - } - .unwrap_or_else(|error| { - warn!(%error, ?event, "failed to jsonify sse event"); - WarpServerSentEvent::default() +fn event_to_warp_event( + event: &ServerSentEvent, + data: &SseData, + is_legacy_filter: bool, + maybe_id: Option, +) -> Option> { + let warp_data = WarpServerSentEvent::default(); + let maybe_event = if is_legacy_filter { + let legacy_data = LegacySseData::from(data); + legacy_data.map(|data| { + warp_data.json_data(&data).unwrap_or_else(|error| { + warn!(%error, ?event, "failed to jsonify sse event"); + WarpServerSentEvent::default() + }) + }) + } else { + Some(warp_data.json_data(data).unwrap_or_else(|error| { + warn!(%error, ?event, "failed to jsonify sse event"); + WarpServerSentEvent::default() + })) + }; + maybe_event.map(|mut event| { + if let Some(id) = maybe_id { + event = event.id(id); + } + Ok(event) }) } @@ -151,6 +209,7 @@ pub(super) struct NewSubscriberInfo { /// A channel to send the initial events to the client's handler. This will always send the /// ApiVersion as the first event, and then any buffered events as indicated by `start_from`. pub(super) initial_events_sender: mpsc::UnboundedSender, + pub(super) enable_legacy_filters: bool, } /// Filters the `event`, mapping it to a warp event, or `None` if it should be filtered out. @@ -158,83 +217,82 @@ async fn filter_map_server_sent_event( event: &ServerSentEvent, stream_filter: &Endpoint, event_filter: &[EventFilter], + is_legacy_filter: bool, ) -> Option> { - if !event.data.should_include(event_filter) { + if should_skip_event(event, event_filter, is_legacy_filter) { return None; } - let id = match determine_id(event) { - Some(id) => id, - None => return None, - }; - match &event.data { - &SseData::ApiVersion { .. } | &SseData::SidecarVersion { .. } => { - let warp_event = event_to_warp_event(event); - Some(Ok(warp_event)) - } - &SseData::BlockAdded { .. } - | &SseData::DeployProcessed { .. } - | &SseData::DeployExpired { .. } - | &SseData::Fault { .. } - | &SseData::Step { .. } - | &SseData::FinalitySignature(_) => { - let warp_event = event_to_warp_event(event).id(id); - Some(Ok(warp_event)) - } - SseData::DeployAccepted { deploy } => handle_deploy_accepted(event, deploy, &id), - &SseData::Shutdown => { - if should_send_shutdown(event, stream_filter) { - build_event_for_outbound(event, id) - } else { - None + if let Some(data) = event.data.as_ref() { + let id = match determine_id(event, data) { + Some(id) => id, + None => return None, + }; + match data { + &SseData::ApiVersion { .. } | &SseData::SidecarVersion { .. } => { + event_to_warp_event(event, data, is_legacy_filter, None) + } + &SseData::BlockAdded { .. } + | &SseData::TransactionProcessed { .. } + | &SseData::TransactionExpired { .. } + | &SseData::Fault { .. } + | &SseData::Step { .. } + | &SseData::TransactionAccepted(..) + | &SseData::FinalitySignature(_) => { + event_to_warp_event(event, data, is_legacy_filter, Some(id)) + } + &SseData::Shutdown => { + if should_send_shutdown(event, stream_filter) { + build_event_for_outbound(event, data, id) + } else { + None + } } } + } else if let Some(comment) = event.comment { + build_comment_event(comment) + } else { + None + } +} + +fn should_skip_event( + event: &ServerSentEvent, + event_filter: &[EventFilter], + is_legacy_filter: bool, +) -> bool { + if !event + .data + .as_ref() + .map(|d| d.should_include(event_filter)) + .unwrap_or(true) + { + return true; + } + + if !event + .comment + .as_ref() + .map(|_| is_legacy_filter) + .unwrap_or(true) + { + return true; } + false } fn should_send_shutdown(event: &ServerSentEvent, stream_filter: &Endpoint) -> bool { match (&event.inbound_filter, stream_filter) { (None, Endpoint::Sidecar) => true, + (Some(_), _) => true, (None, _) => false, - (Some(SseFilter::Main), Endpoint::Events) => true, //If this filter handles the `/events` endpoint - // then it should also propagate from inbounds `/events/main` - (Some(SseFilter::Events), Endpoint::Main) => true, //If we are connected to a legacy node - // and the client is listening to /events/main we want to get shutdown from that - (Some(a), b) if b.is_corresponding_to(a) => true, - _ => false, } } -fn handle_deploy_accepted( - event: &ServerSentEvent, - deploy: &Arc, - id: &String, -) -> Option> { - let maybe_value = event - .json_data - .as_ref() - .map(|el| serde_json::from_str::(el).unwrap()); - let warp_event = match maybe_value { - Some(json_data) => WarpServerSentEvent::default().json_data(json_data), - None => { - let deploy_accepted = &DeployAccepted { - deploy_accepted: deploy.clone(), - }; - WarpServerSentEvent::default().json_data(deploy_accepted) - } - } - .unwrap_or_else(|error| { - warn!(%error, ?event, "failed to jsonify sse event"); - WarpServerSentEvent::default() - }) - .id(id); - Some(Ok(warp_event)) -} - -fn determine_id(event: &ServerSentEvent) -> Option { +fn determine_id(event: &ServerSentEvent, data: &SseData) -> Option { match event.id { Some(id) => { - if matches!(&event.data, &SseData::ApiVersion { .. }) { + if matches!(data, &SseData::ApiVersion { .. }) { error!("ApiVersion should have no event ID"); return None; } @@ -242,7 +300,7 @@ fn determine_id(event: &ServerSentEvent) -> Option { } None => { if !matches!( - &event.data, + data, &SseData::ApiVersion { .. } | &SseData::SidecarVersion { .. } ) { error!("only ApiVersion and SidecarVersion may have no event ID"); @@ -253,17 +311,18 @@ fn determine_id(event: &ServerSentEvent) -> Option { } } +fn build_comment_event(comment: &str) -> Option> { + Some(Ok(WarpServerSentEvent::default().comment(comment))) +} + fn build_event_for_outbound( event: &ServerSentEvent, + data: &SseData, id: String, ) -> Option> { - let maybe_value = event - .json_data - .as_ref() - .map(|el| serde_json::from_str::(el).unwrap()) - .unwrap_or_else(|| serde_json::to_value(&event.data).unwrap()); + let json_value = serde_json::to_value(data).unwrap(); Some(Ok(WarpServerSentEvent::default() - .json_data(&maybe_value) + .json_data(&json_value) .unwrap_or_else(|error| { warn!(%error, ?event, "failed to jsonify sse event"); WarpServerSentEvent::default() @@ -271,24 +330,30 @@ fn build_event_for_outbound( .id(id))) } -pub(super) fn path_to_filter(path_param: &str) -> Option<&'static Endpoint> { +pub(super) fn path_to_filter( + path_param: &str, + enable_legacy_filters: bool, +) -> Option<&'static Endpoint> { match path_param { SSE_API_ROOT_PATH => Some(&Endpoint::Events), - SSE_API_MAIN_PATH => Some(&Endpoint::Main), - SSE_API_DEPLOYS_PATH => Some(&Endpoint::Deploys), - SSE_API_SIGNATURES_PATH => Some(&Endpoint::Sigs), + SSE_API_MAIN_PATH if enable_legacy_filters => Some(&Endpoint::Main), + SSE_API_DEPLOYS_PATH if enable_legacy_filters => Some(&Endpoint::Deploys), + SSE_API_SIGNATURES_PATH if enable_legacy_filters => Some(&Endpoint::Sigs), SSE_API_SIDECAR_PATH => Some(&Endpoint::Sidecar), _ => None, } } /// Converts the final URL path element to a slice of `EventFilter`s. -pub(super) fn get_filter(path_param: &str) -> Option<&'static [EventFilter]> { +pub(super) fn get_filter( + path_param: &str, + enable_legacy_filters: bool, +) -> Option<(&'static [EventFilter], bool)> { match path_param { - SSE_API_ROOT_PATH => Some(&EVENTS_FILTER[..]), - SSE_API_MAIN_PATH => Some(&MAIN_FILTER[..]), - SSE_API_DEPLOYS_PATH => Some(&DEPLOYS_FILTER[..]), - SSE_API_SIGNATURES_PATH => Some(&SIGNATURES_FILTER[..]), - SSE_API_SIDECAR_PATH => Some(&SIDECAR_FILTER[..]), + SSE_API_ROOT_PATH => Some((&EVENTS_FILTER[..], false)), + SSE_API_MAIN_PATH if enable_legacy_filters => Some((&MAIN_FILTER[..], true)), + SSE_API_DEPLOYS_PATH if enable_legacy_filters => Some((&DEPLOYS_FILTER[..], true)), + SSE_API_SIGNATURES_PATH if enable_legacy_filters => Some((&SIGNATURES_FILTER[..], true)), + SSE_API_SIDECAR_PATH => Some((&SIDECAR_FILTER[..], false)), _ => None, } } @@ -316,14 +381,25 @@ fn parse_query(query: HashMap) -> Result, Response> { } /// Creates a 404 response with a useful error message in the body. -fn create_404() -> Response { - let mut response = Response::new(Body::from(format!( - "invalid path: expected '/{root}/{main}', '/{root}/{deploys}' or '/{root}/{sigs}'\n", - root = SSE_API_ROOT_PATH, - main = SSE_API_MAIN_PATH, - deploys = SSE_API_DEPLOYS_PATH, - sigs = SSE_API_SIGNATURES_PATH - ))); +fn create_404(enable_legacy_filters: bool) -> Response { + let text = if enable_legacy_filters { + format!( + "invalid path: expected '/{root}/{main}', '/{root}/{deploys}' or '/{root}/{sigs} or '/{root}/{sidecar}'\n", + root = SSE_API_ROOT_PATH, + main = SSE_API_MAIN_PATH, + deploys = SSE_API_DEPLOYS_PATH, + sigs = SSE_API_SIGNATURES_PATH, + sidecar = SSE_API_SIDECAR_PATH, + ) + } else { + format!( + "invalid path: expected '/{root}/{main}' or '/{root}/{sidecar}'\n", + root = SSE_API_ROOT_PATH, + main = SSE_API_MAIN_PATH, + sidecar = SSE_API_SIDECAR_PATH, + ) + }; + let mut response = Response::new(Body::from(text)); *response.status_mut() = StatusCode::NOT_FOUND; response } @@ -359,15 +435,17 @@ fn serve_sse_response_handler( cloned_broadcaster: tokio::sync::broadcast::Sender, max_concurrent_subscribers: u32, new_subscriber_info_sender: UnboundedSender, + enable_legacy_filters: bool, #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, ) -> http::Response { if let Some(value) = validate(&cloned_broadcaster, max_concurrent_subscribers) { return value; } - let (event_filter, stream_filter, start_from) = match parse_url_props(maybe_path_param, query) { - Ok(value) => value, - Err(error_response) => return error_response, - }; + let (event_filter, stream_filter, start_from, is_legacy_filter) = + match parse_url_props(maybe_path_param, query, enable_legacy_filters) { + Ok(value) => value, + Err(error_response) => return error_response, + }; // Create a channel for the client's handler to receive the stream of initial events. let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel(); @@ -377,6 +455,7 @@ fn serve_sse_response_handler( let new_subscriber_info = NewSubscriberInfo { start_from, initial_events_sender, + enable_legacy_filters, }; if new_subscriber_info_sender .send(new_subscriber_info) @@ -393,6 +472,7 @@ fn serve_sse_response_handler( ongoing_events_receiver, stream_filter, event_filter, + is_legacy_filter, #[cfg(feature = "additional-metrics")] metrics_sender, ))) @@ -402,21 +482,20 @@ fn serve_sse_response_handler( fn parse_url_props( maybe_path_param: Option, query: HashMap, + enable_legacy_filters: bool, ) -> Result> { let path_param = maybe_path_param.unwrap_or_else(|| SSE_API_ROOT_PATH.to_string()); - let event_filter = match get_filter(path_param.as_str()) { - Some(filter) => filter, - None => return Err(create_404()), - }; - let stream_filter = match path_to_filter(path_param.as_str()) { + let (event_filter, is_legacy_filter) = + match get_filter(path_param.as_str(), enable_legacy_filters) { + Some((filter, is_legacy_filter)) => (filter, is_legacy_filter), + None => return Err(create_404(enable_legacy_filters)), + }; + let stream_filter = match path_to_filter(path_param.as_str(), enable_legacy_filters) { Some(filter) => filter, - None => return Err(create_404()), - }; - let start_from = match parse_query(query) { - Ok(maybe_id) => maybe_id, - Err(error_response) => return Err(error_response), + None => return Err(create_404(enable_legacy_filters)), }; - Ok((event_filter, stream_filter, start_from)) + let start_from = parse_query(query)?; + Ok((event_filter, stream_filter, start_from, is_legacy_filter)) } fn validate( @@ -437,7 +516,11 @@ fn validate( impl ChannelsAndFilter { /// Creates the message-passing channels required to run the event-stream server and the warp /// filter for the event-stream server. - pub(super) fn new(broadcast_channel_size: usize, max_concurrent_subscribers: u32) -> Self { + pub(super) fn new( + broadcast_channel_size: usize, + max_concurrent_subscribers: u32, + enable_legacy_filters: bool, + ) -> Self { // Create a channel to broadcast new events to all subscribed clients' streams. let (event_broadcaster, _) = broadcast::channel(broadcast_channel_size); let cloned_broadcaster = event_broadcaster.clone(); @@ -464,12 +547,15 @@ impl ChannelsAndFilter { cloned_broadcaster.clone(), max_concurrent_subscribers, new_subscriber_info_sender_clone, + enable_legacy_filters, #[cfg(feature = "additional-metrics")] tx.clone(), ) }, ) - .or_else(|_| async move { Ok::<_, Rejection>((create_404(),)) }) + .or_else( + move |_| async move { Ok::<_, Rejection>((create_404(enable_legacy_filters),)) }, + ) .boxed(); ChannelsAndFilter { @@ -499,6 +585,7 @@ fn stream_to_client( ongoing_events: broadcast::Receiver, stream_filter: &'static Endpoint, event_filter: &'static [EventFilter], + is_legacy_filter: bool, #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, ) -> impl Stream> + 'static { // Keep a record of the IDs of the events delivered via the `initial_events` receiver. @@ -528,6 +615,9 @@ fn stream_to_client( ongoing_stream, stream_filter, event_filter, + is_legacy_filter, + #[cfg(feature = "additional-metrics")] + metrics_sender, ) } @@ -540,6 +630,8 @@ fn build_combined_events_stream( >, stream_filter: &'static Endpoint, event_filter: &'static [EventFilter], + is_legacy_filter: bool, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, ) -> impl Stream> + 'static { UnboundedReceiverStream::new(initial_events) .map(move |event| { @@ -557,10 +649,15 @@ fn build_combined_events_stream( let sender = metrics_sender; match result { Ok(event) => { - let fitlered_data = - filter_map_server_sent_event(&event, stream_filter, event_filter).await; + let fitlered_data = filter_map_server_sent_event( + &event, + stream_filter, + event_filter, + is_legacy_filter, + ) + .await; #[cfg(feature = "additional-metrics")] - if let Some(_) = fitlered_data { + if fitlered_data.is_some() { let _ = sender.clone().send(()).await; } #[allow(clippy::let_and_return)] @@ -596,10 +693,8 @@ fn handle_sse_event( #[cfg(test)] mod tests { use super::*; - use casper_event_types::DeployHash; - use casper_types::testing::TestRng; + use casper_types::{testing::TestRng, TransactionHash}; use rand::Rng; - use regex::Regex; use std::iter; #[cfg(feature = "additional-metrics")] use tokio::sync::mpsc::channel; @@ -611,7 +706,7 @@ mod tests { async fn should_filter_out(event: &ServerSentEvent, filter: &'static [EventFilter]) { assert!( - filter_map_server_sent_event(event, &Endpoint::Main, filter) + filter_map_server_sent_event(event, &Endpoint::Events, filter, false) .await .is_none(), "should filter out {:?} with {:?}", @@ -622,7 +717,7 @@ mod tests { async fn should_not_filter_out(event: &ServerSentEvent, filter: &'static [EventFilter]) { assert!( - filter_map_server_sent_event(event, &Endpoint::Main, filter) + filter_map_server_sent_event(event, &Endpoint::Events, filter, false) .await .is_some(), "should not filter out {:?} with {:?}", @@ -640,99 +735,122 @@ mod tests { let api_version = ServerSentEvent { id: None, - data: SseData::random_api_version(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_api_version(&mut rng)), inbound_filter: None, }; let block_added = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_block_added(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_block_added(&mut rng)), inbound_filter: None, }; - let (sse_data, deploy) = SseData::random_deploy_accepted(&mut rng); - let deploy_accepted = ServerSentEvent { + let (sse_data, transaction) = SseData::random_transaction_accepted(&mut rng); + let transaction_accepted = ServerSentEvent { id: Some(rng.gen()), - data: sse_data, - json_data: None, + comment: None, + data: Some(sse_data), inbound_filter: None, }; - let mut deploys = HashMap::new(); - let _ = deploys.insert(*deploy.hash(), deploy); - let deploy_processed = ServerSentEvent { + let mut transactions = HashMap::new(); + let _ = transactions.insert(transaction.hash(), transaction); + let transaction_processed = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_deploy_processed(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_transaction_processed(&mut rng)), inbound_filter: None, }; - let deploy_expired = ServerSentEvent { + let transaction_expired = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_deploy_expired(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_transaction_expired(&mut rng)), inbound_filter: None, }; let fault = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_fault(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_fault(&mut rng)), inbound_filter: None, }; let finality_signature = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_finality_signature(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_finality_signature(&mut rng)), inbound_filter: None, }; let step = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_step(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_step(&mut rng)), inbound_filter: None, }; let shutdown = ServerSentEvent { id: Some(rng.gen()), - data: SseData::Shutdown, - json_data: None, - inbound_filter: Some(SseFilter::Main), - //For shutdown we need to provide the inbound - //filter because we send shutdowns only to corresponding outbounds to prevent duplicates + comment: None, + data: Some(SseData::Shutdown), + inbound_filter: Some(SseFilter::Events), + }; + let sidecar_api_version = ServerSentEvent { + id: Some(rng.gen()), + comment: None, + data: Some(SseData::random_sidecar_version(&mut rng)), + inbound_filter: None, }; - // `EventFilter::Main` should only filter out `DeployAccepted`s and `FinalitySignature`s. + should_not_filter_out(&api_version, &EVENTS_FILTER[..]).await; + should_not_filter_out(&block_added, &EVENTS_FILTER[..]).await; + should_not_filter_out(&transaction_accepted, &EVENTS_FILTER[..]).await; + should_not_filter_out(&transaction_processed, &EVENTS_FILTER[..]).await; + should_not_filter_out(&transaction_expired, &EVENTS_FILTER[..]).await; + should_not_filter_out(&fault, &EVENTS_FILTER[..]).await; + should_not_filter_out(&step, &EVENTS_FILTER[..]).await; + should_not_filter_out(&shutdown, &EVENTS_FILTER).await; + should_not_filter_out(&api_version, &EVENTS_FILTER[..]).await; + should_not_filter_out(&finality_signature, &EVENTS_FILTER[..]).await; + should_filter_out(&sidecar_api_version, &EVENTS_FILTER[..]).await; + + should_filter_out(&api_version, &SIDECAR_FILTER[..]).await; + should_filter_out(&block_added, &SIDECAR_FILTER[..]).await; + should_filter_out(&transaction_accepted, &SIDECAR_FILTER[..]).await; + should_filter_out(&transaction_processed, &SIDECAR_FILTER[..]).await; + should_filter_out(&transaction_expired, &SIDECAR_FILTER[..]).await; + should_filter_out(&fault, &SIDECAR_FILTER[..]).await; + should_filter_out(&step, &SIDECAR_FILTER[..]).await; + should_filter_out(&api_version, &SIDECAR_FILTER[..]).await; + should_filter_out(&finality_signature, &SIDECAR_FILTER[..]).await; + should_not_filter_out(&shutdown, &SIDECAR_FILTER).await; + should_not_filter_out(&sidecar_api_version, &SIDECAR_FILTER[..]).await; + should_not_filter_out(&api_version, &MAIN_FILTER[..]).await; should_not_filter_out(&block_added, &MAIN_FILTER[..]).await; - should_not_filter_out(&deploy_processed, &MAIN_FILTER[..]).await; - should_not_filter_out(&deploy_expired, &MAIN_FILTER[..]).await; + should_not_filter_out(&transaction_processed, &MAIN_FILTER[..]).await; + should_not_filter_out(&transaction_expired, &MAIN_FILTER[..]).await; should_not_filter_out(&fault, &MAIN_FILTER[..]).await; should_not_filter_out(&step, &MAIN_FILTER[..]).await; should_not_filter_out(&shutdown, &MAIN_FILTER).await; - should_filter_out(&deploy_accepted, &MAIN_FILTER[..]).await; + should_filter_out(&transaction_accepted, &MAIN_FILTER[..]).await; should_filter_out(&finality_signature, &MAIN_FILTER[..]).await; - // `EventFilter::DeployAccepted` should filter out everything except `ApiVersion`s and - // `DeployAccepted`s. should_not_filter_out(&api_version, &DEPLOYS_FILTER[..]).await; - should_not_filter_out(&deploy_accepted, &DEPLOYS_FILTER[..]).await; + should_not_filter_out(&transaction_accepted, &DEPLOYS_FILTER[..]).await; should_not_filter_out(&shutdown, &DEPLOYS_FILTER[..]).await; should_filter_out(&block_added, &DEPLOYS_FILTER[..]).await; - should_filter_out(&deploy_processed, &DEPLOYS_FILTER[..]).await; - should_filter_out(&deploy_expired, &DEPLOYS_FILTER[..]).await; + should_filter_out(&transaction_processed, &DEPLOYS_FILTER[..]).await; + should_filter_out(&transaction_expired, &DEPLOYS_FILTER[..]).await; should_filter_out(&fault, &DEPLOYS_FILTER[..]).await; should_filter_out(&finality_signature, &DEPLOYS_FILTER[..]).await; should_filter_out(&step, &DEPLOYS_FILTER[..]).await; - // `EventFilter::Signatures` should filter out everything except `ApiVersion`s and - // `FinalitySignature`s. should_not_filter_out(&api_version, &SIGNATURES_FILTER[..]).await; should_not_filter_out(&finality_signature, &SIGNATURES_FILTER[..]).await; should_not_filter_out(&shutdown, &SIGNATURES_FILTER[..]).await; should_filter_out(&block_added, &SIGNATURES_FILTER[..]).await; - should_filter_out(&deploy_accepted, &SIGNATURES_FILTER[..]).await; - should_filter_out(&deploy_processed, &SIGNATURES_FILTER[..]).await; - should_filter_out(&deploy_expired, &SIGNATURES_FILTER[..]).await; + should_filter_out(&transaction_accepted, &SIGNATURES_FILTER[..]).await; + should_filter_out(&transaction_processed, &SIGNATURES_FILTER[..]).await; + should_filter_out(&transaction_expired, &SIGNATURES_FILTER[..]).await; should_filter_out(&fault, &SIGNATURES_FILTER[..]).await; should_filter_out(&step, &SIGNATURES_FILTER[..]).await; } @@ -746,72 +864,74 @@ mod tests { let malformed_api_version = ServerSentEvent { id: Some(rng.gen()), - data: SseData::random_api_version(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_api_version(&mut rng)), inbound_filter: None, }; let malformed_block_added = ServerSentEvent { id: None, - data: SseData::random_block_added(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_block_added(&mut rng)), inbound_filter: None, }; - let (sse_data, deploy) = SseData::random_deploy_accepted(&mut rng); - let malformed_deploy_accepted = ServerSentEvent { + let (sse_data, transaction) = SseData::random_transaction_accepted(&mut rng); + let malformed_transaction_accepted = ServerSentEvent { id: None, - data: sse_data, - json_data: None, + comment: None, + data: Some(sse_data), inbound_filter: None, }; - let mut deploys = HashMap::new(); - let _ = deploys.insert(*deploy.hash(), deploy); - let malformed_deploy_processed = ServerSentEvent { + let mut transactions = HashMap::new(); + let _ = transactions.insert(transaction.hash(), transaction); + let malformed_transaction_processed = ServerSentEvent { id: None, - data: SseData::random_deploy_processed(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_transaction_processed(&mut rng)), inbound_filter: None, }; - let malformed_deploy_expired = ServerSentEvent { + let malformed_transaction_expired = ServerSentEvent { id: None, - data: SseData::random_deploy_expired(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_transaction_expired(&mut rng)), inbound_filter: None, }; let malformed_fault = ServerSentEvent { id: None, - data: SseData::random_fault(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_fault(&mut rng)), inbound_filter: None, }; let malformed_finality_signature = ServerSentEvent { id: None, - data: SseData::random_finality_signature(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_finality_signature(&mut rng)), inbound_filter: None, }; let malformed_step = ServerSentEvent { id: None, - data: SseData::random_step(&mut rng), - json_data: None, + comment: None, + data: Some(SseData::random_step(&mut rng)), inbound_filter: None, }; let malformed_shutdown = ServerSentEvent { id: None, - data: SseData::Shutdown, - json_data: None, + comment: None, + data: Some(SseData::Shutdown), inbound_filter: None, }; for filter in &[ + &EVENTS_FILTER[..], + &SIDECAR_FILTER[..], &MAIN_FILTER[..], &DEPLOYS_FILTER[..], &SIGNATURES_FILTER[..], ] { should_filter_out(&malformed_api_version, filter).await; should_filter_out(&malformed_block_added, filter).await; - should_filter_out(&malformed_deploy_accepted, filter).await; - should_filter_out(&malformed_deploy_processed, filter).await; - should_filter_out(&malformed_deploy_expired, filter).await; + should_filter_out(&malformed_transaction_accepted, filter).await; + should_filter_out(&malformed_transaction_processed, filter).await; + should_filter_out(&malformed_transaction_expired, filter).await; should_filter_out(&malformed_fault, filter).await; should_filter_out(&malformed_finality_signature, filter).await; should_filter_out(&malformed_step, filter).await; @@ -820,10 +940,10 @@ mod tests { } #[allow(clippy::too_many_lines)] - async fn should_filter_duplicate_events(path_filter: &str) { + async fn should_filter_duplicate_events(path_filter: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); - let mut deploys = HashMap::new(); + let mut transactions = HashMap::new(); let initial_events: Vec = iter::once(ServerSentEvent::initial_event(ProtocolVersion::V1_0_0)) @@ -832,7 +952,7 @@ mod tests { 0, NUM_INITIAL_EVENTS, path_filter, - &mut deploys, + &mut transactions, )) .collect(); @@ -845,7 +965,7 @@ mod tests { *duplicate_count, &initial_events, path_filter, - &mut deploys, + &mut transactions, ); let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel(); @@ -865,15 +985,17 @@ mod tests { drop(initial_events_sender); drop(ongoing_events_sender); - let stream_filter = path_to_filter(path_filter).unwrap(); + let stream_filter = path_to_filter(path_filter, true).unwrap(); #[cfg(feature = "additional-metrics")] - let (tx, rx) = channel(1000); + let (tx, _rx) = channel(1000); + let (filter, is_legacy_filter) = get_filter(path_filter, true).unwrap(); // Collect the events emitted by `stream_to_client()` - should not contain duplicates. let received_events: Vec> = stream_to_client( initial_events_receiver, ongoing_events_receiver, stream_filter, - get_filter(path_filter).unwrap(), + filter, + is_legacy_filter, #[cfg(feature = "additional-metrics")] tx, ) @@ -898,84 +1020,132 @@ mod tests { { let received_event = received_event.as_ref().unwrap(); - let expected_data = deduplicated_event.data.clone(); + let expected_data = deduplicated_event.data.clone().unwrap(); let mut received_event_str = received_event.to_string().trim().to_string(); - let ends_with_id = Regex::new(r"\nid:\d*$").unwrap(); - let starts_with_data = Regex::new(r"^data:").unwrap(); if let Some(id) = deduplicated_event.id { assert!(received_event_str.ends_with(format!("\nid:{}", id).as_str())); } else { - assert!(!ends_with_id.is_match(received_event_str.as_str())); + assert!(!ENDS_WITH_ID_REGEX.is_match(received_event_str.as_str())); }; - received_event_str = ends_with_id + received_event_str = ENDS_WITH_ID_REGEX .replace_all(received_event_str.as_str(), "") .into_owned(); - received_event_str = starts_with_data + received_event_str = STARTS_WITH_REGEX .replace_all(received_event_str.as_str(), "") .into_owned(); - let received_data = - serde_json::from_str::(received_event_str.as_str()).unwrap(); - let expected_data = serde_json::to_value(&expected_data).unwrap(); - assert_eq!(expected_data, received_data); + if is_legacy_endpoint { + let maybe_legacy = LegacySseData::from(&expected_data); + assert!(maybe_legacy.is_some()); + let input_legacy = maybe_legacy.unwrap(); + let got_legacy = + serde_json::from_str::(received_event_str.as_str()).unwrap(); + assert_eq!(got_legacy, input_legacy); + } else { + let received_data = + serde_json::from_str::(received_event_str.as_str()).unwrap(); + let expected_data = serde_json::to_value(&expected_data).unwrap(); + assert_eq!(expected_data, received_data); + } } } } - /// This test checks that main events from the initial stream which are duplicated in the - /// ongoing stream are filtered out. #[tokio::test] async fn should_filter_duplicate_main_events() { - should_filter_duplicate_events(SSE_API_MAIN_PATH).await + should_filter_duplicate_events(SSE_API_MAIN_PATH, true).await } - /// This test checks that deploy-accepted events from the initial stream which are duplicated in /// the ongoing stream are filtered out. #[tokio::test] async fn should_filter_duplicate_deploys_events() { - should_filter_duplicate_events(SSE_API_DEPLOYS_PATH).await + should_filter_duplicate_events(SSE_API_DEPLOYS_PATH, true).await } /// This test checks that signature events from the initial stream which are duplicated in the /// ongoing stream are filtered out. #[tokio::test] async fn should_filter_duplicate_signature_events() { - should_filter_duplicate_events(SSE_API_SIGNATURES_PATH).await + should_filter_duplicate_events(SSE_API_SIGNATURES_PATH, true).await + } + + /// This test checks that main events from the initial stream which are duplicated in the + /// ongoing stream are filtered out. + #[tokio::test] + async fn should_filter_duplicate_firehose_events() { + should_filter_duplicate_events(SSE_API_ROOT_PATH, false).await } - // Returns `count` random SSE events, all of a single variant defined by `path_filter`. The - // events will have sequential IDs starting from `start_id`, and if the path filter - // indicates the events should be deploy-accepted ones, the corresponding random deploys - // will be inserted into `deploys`. + // Returns `count` random SSE events. The events will have sequential IDs starting from `start_id`, and if the path filter + // indicates the events should be transaction-accepted ones, the corresponding random transactions + // will be inserted into `transactions`. fn make_random_events( rng: &mut TestRng, start_id: Id, count: usize, path_filter: &str, - deploys: &mut HashMap, + transactions: &mut HashMap, ) -> Vec { (start_id..(start_id + count as u32)) .map(|id| { let data = match path_filter { - SSE_API_MAIN_PATH => SseData::random_block_added(rng), + SSE_API_MAIN_PATH => make_legacy_compliant_random_block(rng), SSE_API_DEPLOYS_PATH => { - let (event, deploy) = SseData::random_deploy_accepted(rng); - assert!(deploys.insert(*deploy.hash(), deploy).is_none()); + let (event, transaction) = make_legacy_compliant_random_transaction(rng); + assert!(transactions + .insert(transaction.hash(), transaction) + .is_none()); event } SSE_API_SIGNATURES_PATH => SseData::random_finality_signature(rng), + SSE_API_ROOT_PATH => { + let discriminator = id % 3; + match discriminator { + 0 => SseData::random_block_added(rng), + 1 => { + let (event, transaction) = + SseData::random_transaction_accepted(rng); + assert!(transactions + .insert(transaction.hash(), transaction) + .is_none()); + event + } + 2 => SseData::random_finality_signature(rng), + _ => unreachable!(), + } + } _ => unreachable!(), }; ServerSentEvent { id: Some(id), - data, - json_data: None, + comment: None, + data: Some(data), inbound_filter: None, } }) .collect() } + fn make_legacy_compliant_random_transaction(rng: &mut TestRng) -> (SseData, Transaction) { + loop { + let (event, transaction) = SseData::random_transaction_accepted(rng); + let legacy = LegacySseData::from(&event); + if legacy.is_some() { + return (event, transaction); + } + } + } + + fn make_legacy_compliant_random_block(rng: &mut TestRng) -> SseData { + loop { + let block = SseData::random_block_added(rng); + let legacy = LegacySseData::from(&block); + if legacy.is_some() { + return block; + } + } + } + // Returns `NUM_ONGOING_EVENTS` random SSE events for the ongoing stream containing // duplicates taken from the end of the initial stream. Allows for the full initial stream // to be duplicated except for its first event (the `ApiVersion` one) which has no ID. @@ -984,7 +1154,7 @@ mod tests { duplicate_count: usize, initial_events: &[ServerSentEvent], path_filter: &str, - deploys: &mut HashMap, + transactions: &mut HashMap, ) -> Vec { assert!(duplicate_count < initial_events.len()); let initial_skip_count = initial_events.len() - duplicate_count; @@ -999,7 +1169,7 @@ mod tests { unique_start_id, unique_count, path_filter, - deploys, + transactions, )) .collect() } diff --git a/sidecar/src/event_stream_server/tests.rs b/event_sidecar/src/event_stream_server/tests.rs similarity index 71% rename from sidecar/src/event_stream_server/tests.rs rename to event_sidecar/src/event_stream_server/tests.rs index 3f4c64dd..6726ba54 100644 --- a/sidecar/src/event_stream_server/tests.rs +++ b/event_sidecar/src/event_stream_server/tests.rs @@ -1,19 +1,23 @@ +use self::sse_server::LEGACY_ENDPOINT_NOTICE; + use super::*; +use casper_event_types::legacy_sse_data::LegacySseData; use casper_types::{testing::TestRng, ProtocolVersion}; -use futures::{join, StreamExt}; -use http::StatusCode; +use futures::{join, Stream, StreamExt}; use pretty_assertions::assert_eq; -use reqwest::Response; +use reqwest::{Response, StatusCode}; use serde_json::Value; use sse_server::{ - DeployAccepted, Id, QUERY_FIELD, SSE_API_DEPLOYS_PATH as DEPLOYS_PATH, + Id, TransactionAccepted, QUERY_FIELD, SSE_API_DEPLOYS_PATH as DEPLOYS_PATH, SSE_API_MAIN_PATH as MAIN_PATH, SSE_API_ROOT_PATH as ROOT_PATH, SSE_API_SIGNATURES_PATH as SIGS_PATH, }; use std::{ collections::HashMap, error::Error, - fs, io, iter, str, + fs, io, iter, + pin::Pin, + str, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -192,7 +196,7 @@ impl Drop for ServerStopper { struct TestFixture { storage_dir: TempDir, protocol_version: ProtocolVersion, - events: Vec<(SseData, Option)>, + events: Vec, first_event_id: Id, server_join_handle: Option>, server_stopper: ServerStopper, @@ -207,23 +211,24 @@ impl TestFixture { fs::create_dir_all(&storage_dir).unwrap(); let protocol_version = ProtocolVersion::from_parts(1, 2, 3); - let mut deploys = HashMap::new(); - let events: Vec<(SseData, Option)> = (0..EVENT_COUNT) + let mut transactions = HashMap::new(); + let events: Vec = (0..EVENT_COUNT) .map(|i| match i % DISTINCT_EVENTS_COUNT { 0 => SseData::random_block_added(rng), 1 => { - let (event, deploy) = SseData::random_deploy_accepted(rng); - assert!(deploys.insert(*deploy.hash(), deploy).is_none()); + let (event, transaction) = SseData::random_transaction_accepted(rng); + assert!(transactions + .insert(transaction.hash(), transaction) + .is_none()); event } - 2 => SseData::random_deploy_processed(rng), - 3 => SseData::random_deploy_expired(rng), + 2 => SseData::random_transaction_processed(rng), + 3 => SseData::random_transaction_expired(rng), 4 => SseData::random_fault(rng), 5 => SseData::random_step(rng), 6 => SseData::random_finality_signature(rng), _ => unreachable!(), }) - .map(|x| (x, None)) .collect(); TestFixture { storage_dir, @@ -267,7 +272,7 @@ impl TestFixture { ..Default::default() }; let mut server = - EventStreamServer::new(config, self.storage_dir.path().to_path_buf()).unwrap(); + EventStreamServer::new(config, self.storage_dir.path().to_path_buf(), true).unwrap(); self.first_event_id = server.event_indexer.current_index(); @@ -284,10 +289,8 @@ impl TestFixture { }; let api_version_event = SseData::ApiVersion(protocol_version); - server.broadcast(api_version_event.clone(), Some(SseFilter::Main), None); - for (id, (event, maybe_json_data)) in - events.iter().cycle().enumerate().take(event_count as usize) - { + server.broadcast(api_version_event.clone(), Some(SseFilter::Events)); + for (id, event) in events.iter().cycle().enumerate().take(event_count as usize) { if server_stopper.should_stop() { debug!("stopping server early"); return; @@ -295,13 +298,7 @@ impl TestFixture { server_behavior .wait_for_clients((id as Id).wrapping_add(first_event_id)) .await; - server.broadcast( - event.clone(), - Some(SseFilter::Main), - maybe_json_data - .as_ref() - .map(|el| serde_json::from_str(el.as_str()).unwrap()), - ); + server.broadcast(event.clone(), Some(SseFilter::Events)); server_behavior.sleep_if_required().await; } @@ -356,17 +353,17 @@ impl TestFixture { data: serde_json::to_string(&SseData::ApiVersion(self.protocol_version)).unwrap(), }; let id_filter = build_id_filter(from); - let filter = sse_server::get_filter(final_path_element).unwrap(); + let (filter, _is_legacy_filter) = sse_server::get_filter(final_path_element, true).unwrap(); let events: Vec = iter::once(api_version_event) .chain( self.events .iter() - .filter(|(event, _)| !matches!(event, SseData::ApiVersion(..))) + .filter(|event| !matches!(event, SseData::ApiVersion(..))) .enumerate() .filter_map(|(id, event)| { let id = id as u128 + self.first_event_id as u128; - if event.0.should_include(filter) { - id_filter(id, &event.0) + if event.should_include(filter) { + id_filter(id, event) } else { None } @@ -445,16 +442,13 @@ async fn subscribe( final_event_id: Id, client_id: &str, ) -> Result, reqwest::Error> { - debug!("{} waiting before connecting via {}", client_id, url); timeout(Duration::from_secs(60), barrier.wait()) .await .unwrap(); let response = reqwest::get(url).await?; - debug!("{} waiting after connecting", client_id); timeout(Duration::from_secs(60), barrier.wait()) .await .unwrap(); - debug!("{} finished waiting", client_id); handle_response(response, final_event_id, client_id).await } @@ -476,18 +470,28 @@ async fn subscribe_slow( timeout(Duration::from_secs(60), barrier.wait()) .await .unwrap(); - time::sleep(Duration::from_secs(5)).await; let mut stream = response.bytes_stream(); + let pause_between_events = Duration::from_secs(100) / MAX_EVENT_COUNT; + let mut bytes_buf: Vec = vec![]; while let Some(item) = stream.next().await { // The function is expected to exit here with an `UnexpectedEof` error. let bytes = item?; - let chunk = str::from_utf8(bytes.as_ref()).unwrap(); - if chunk.lines().any(|line| line == ":") { - debug!("{} received keepalive: exiting", client_id); - break; + // We fetch bytes untill we get a chunk that can be fully interpreted as utf-8 + let res: Vec = [bytes_buf.as_slice(), bytes.as_ref()].concat(); + match str::from_utf8(res.as_slice()) { + Ok(chunk) => { + if chunk.lines().any(|line| line == ":") { + debug!("{} received keepalive: exiting", client_id); + break; + } + bytes_buf = vec![]; + } + Err(_) => { + bytes_buf = res; + } } time::sleep(pause_between_events).await; } @@ -528,34 +532,62 @@ async fn handle_response( return Ok(Vec::new()); } - // The stream from the server is not always chunked into events, so gather the stream into a - // single `String` until we receive a keepalive. - let mut response_text = String::new(); - let mut stream = response.bytes_stream(); + let stream = response.bytes_stream(); let final_id_line = format!("id:{}", final_event_id); let keepalive = ":"; + + let response_text = fetch_text( + Box::pin(stream), + final_id_line, + keepalive, + client_id, + final_event_id, + ) + .await?; + + Ok(parse_response(response_text, client_id)) +} + +async fn fetch_text( + mut stream: Pin> + Send + 'static>>, + final_id_line: String, + keepalive: &str, + client_id: &str, + final_event_id: u32, +) -> Result { + let mut bytes_buf: Vec = vec![]; + let mut response_text = String::new(); + // The stream from the server is not always chunked into events, so gather the stream into a + // single `String` until we receive a keepalive. Furthermore - a chunk of bytes can even split a utf8 character in half + // which makes it impossible to interpret it as utf8. We need to fetch bytes untill we get a chunk that can be fully interpreted as utf-8 while let Some(item) = stream.next().await { - // If the server crashes or returns an error in the stream, it is caught here as `item` will - // be an `Err`. let bytes = item?; - let chunk = str::from_utf8(bytes.as_ref()).unwrap(); - response_text.push_str(chunk); - if let Some(line) = response_text - .lines() - .find(|&line| line == final_id_line || line == keepalive) - { - if line == keepalive { - panic!("{} received keepalive", client_id); + // We fetch bytes untill we get a chunk that can be fully interpreted as utf-8 + let res: Vec = [bytes_buf.as_slice(), bytes.as_ref()].concat(); + match str::from_utf8(res.as_slice()) { + Ok(chunk) => { + response_text.push_str(chunk); + if let Some(line) = response_text + .lines() + .find(|&line| line == final_id_line || line == keepalive) + { + if line == keepalive { + panic!("{} received keepalive", client_id); + } + debug!( + "{} received final event ID {}: exiting", + client_id, final_event_id + ); + return Ok(response_text); + } + bytes_buf = vec![]; + } + Err(_) => { + bytes_buf = res; } - debug!( - "{} received final event ID {}: exiting", - client_id, final_event_id - ); - break; } } - - Ok(parse_response(response_text, client_id)) + Ok(response_text) } /// Iterate the lines of the response body. Each line should be one of @@ -566,18 +598,27 @@ async fn handle_response( /// /// The expected order is: /// * data: (note, no ID line follows this first event) -/// then the following three repeated for as many events as are applicable to that stream: +/// then the following three repeated for as many events as are applicable to that stream: /// * data: /// * id: /// * empty line -/// then finally, repeated keepalive lines until the server is shut down. +/// then finally, repeated keepalive lines until the server is shut down. +#[allow(clippy::too_many_lines)] fn parse_response(response_text: String, client_id: &str) -> Vec { let mut received_events = Vec::new(); let mut line_itr = response_text.lines(); + let mut first_line = true; while let Some(data_line) = line_itr.next() { let data = match data_line.strip_prefix("data:") { Some(data_str) => data_str.to_string(), None => { + if first_line { + // In legacy endpoints the first line is a comment containing deprecation notice. When we remove the legacy endpoints we can remove this check. + if data_line.trim() == format!(":{}", LEGACY_ENDPOINT_NOTICE) { + continue; + } + first_line = false; + } if data_line.trim().is_empty() || data_line.trim() == ":" { continue; } else { @@ -622,8 +663,8 @@ fn parse_response(response_text: String, client_id: &str) -> Vec /// * no `?start_from=` query /// * connected before first event /// -/// Expected to receive all main, deploy-accepted or signature events depending on `filter`. -async fn should_serve_events_with_no_query(path: &str) { +/// Expected to receive all main, transaction-accepted or signature events depending on `filter`. +async fn should_serve_events_with_no_query(path: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); @@ -633,34 +674,92 @@ async fn should_serve_events_with_no_query(path: &str) { let url = url(server_address, path, None); let (expected_events, final_id) = fixture.all_filtered_events(path); + let (expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); fixture.stop_server().await; + compare_received_events_for_legacy_endpoints( + is_legacy_endpoint, + expected_events, + received_events, + ); +} + +/// In legacy endpoints not all input events will be re-emitted to output. If an input (2.x) event is not translatable +/// to 1.x it will be muffled. So we need to adjust the final id to the last event that was 1.x translatable. +fn adjust_final_id( + is_legacy_endpoint: bool, + expected_events: Vec, + final_id: u32, +) -> (Vec, u32) { + let (expected_events, final_id) = if is_legacy_endpoint { + let legacy_compliant_events: Vec = expected_events + .iter() + .filter_map(|event| { + let sse_data = serde_json::from_str::(&event.data).unwrap(); + LegacySseData::from(&sse_data).map(|_| event.clone()) + }) + .collect(); + let id = legacy_compliant_events.last().and_then(|el| el.id).unwrap(); + (legacy_compliant_events, id) + } else { + (expected_events, final_id) + }; + (expected_events, final_id) +} - assert_eq!(received_events, expected_events); +/// In legacy endpoints the node produces 2.x compliant sse events, but the node transforms them into legacy format. +/// So to compare we need to apply the translation logic to input 2.x events. +fn compare_received_events_for_legacy_endpoints( + is_legacy_endpoint: bool, + expected_events: Vec, + received_events: Vec, +) { + if is_legacy_endpoint { + let expected_legacy_events: Vec = expected_events + .iter() + .filter_map(|event| { + let sse_data = serde_json::from_str::(&event.data).unwrap(); + LegacySseData::from(&sse_data) + }) + .collect(); + let received_legacy_events: Vec = received_events + .iter() + .map(|event| serde_json::from_str::(&event.data).unwrap()) + .collect(); + assert_eq!(received_legacy_events, expected_legacy_events); + } else { + assert_eq!(received_events, expected_events); + } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_main_events_with_no_query() { - should_serve_events_with_no_query(MAIN_PATH).await; + should_serve_events_with_no_query(MAIN_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_deploy_accepted_events_with_no_query() { - should_serve_events_with_no_query(DEPLOYS_PATH).await; + should_serve_events_with_no_query(DEPLOYS_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_signature_events_with_no_query() { - should_serve_events_with_no_query(SIGS_PATH).await; + should_serve_events_with_no_query(SIGS_PATH, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_firehose_events_with_no_query() { + should_serve_events_with_no_query(ROOT_PATH, false).await; } /// Client setup: /// * `/events/?start_from=25` /// * connected just before event ID 50 /// -/// Expected to receive main, deploy-accepted or signature events (depending on `path`) from ID 25 +/// Expected to receive main, transaction-accepted or signature events (depending on `path`) from ID 25 /// onwards, as events 25 to 49 should still be in the server buffer. -async fn should_serve_events_with_query(path: &str) { +async fn should_serve_events_with_query(path: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); @@ -673,34 +772,45 @@ async fn should_serve_events_with_query(path: &str) { let url = url(server_address, path, Some(start_from_event_id)); let (expected_events, final_id) = fixture.filtered_events(path, start_from_event_id); + let (expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); fixture.stop_server().await; - assert_eq!(received_events, expected_events); + compare_received_events_for_legacy_endpoints( + is_legacy_endpoint, + expected_events, + received_events, + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_main_events_with_query() { - should_serve_events_with_query(MAIN_PATH).await; + should_serve_events_with_query(MAIN_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_deploy_accepted_events_with_query() { - should_serve_events_with_query(DEPLOYS_PATH).await; + should_serve_events_with_query(DEPLOYS_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_signature_events_with_query() { - should_serve_events_with_query(SIGS_PATH).await; + should_serve_events_with_query(SIGS_PATH, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_firehose_events_with_query() { + should_serve_events_with_query(ROOT_PATH, false).await; } /// Client setup: /// * `/events/?start_from=0` /// * connected just before event ID 75 /// -/// Expected to receive main, deploy-accepted or signature events (depending on `path`) from ID 25 +/// Expected to receive main, transaction-accepted or signature events (depending on `path`) from ID 25 /// onwards, as events 0 to 24 should have been purged from the server buffer. -async fn should_serve_remaining_events_with_query(path: &str) { +async fn should_serve_remaining_events_with_query(path: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); @@ -714,34 +824,45 @@ async fn should_serve_remaining_events_with_query(path: &str) { let url = url(server_address, path, Some(start_from_event_id)); let expected_first_event = connect_at_event_id - BUFFER_LENGTH; let (expected_events, final_id) = fixture.filtered_events(path, expected_first_event); + let (expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); fixture.stop_server().await; - assert_eq!(received_events, expected_events); + compare_received_events_for_legacy_endpoints( + is_legacy_endpoint, + expected_events, + received_events, + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_remaining_main_events_with_query() { - should_serve_remaining_events_with_query(MAIN_PATH).await; + should_serve_remaining_events_with_query(MAIN_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_remaining_deploy_accepted_events_with_query() { - should_serve_remaining_events_with_query(DEPLOYS_PATH).await; + should_serve_remaining_events_with_query(DEPLOYS_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_remaining_signature_events_with_query() { - should_serve_remaining_events_with_query(SIGS_PATH).await; + should_serve_remaining_events_with_query(SIGS_PATH, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_remaining_firehose_events_with_query() { + should_serve_remaining_events_with_query(ROOT_PATH, false).await; } /// Client setup: /// * `/events/?start_from=25` /// * connected before first event /// -/// Expected to receive all main, deploy-accepted or signature events (depending on `path`), as +/// Expected to receive all main, transaction-accepted or signature events (depending on `path`), as /// event 25 hasn't been added to the server buffer yet. -async fn should_serve_events_with_query_for_future_event(path: &str) { +async fn should_serve_events_with_query_for_future_event(path: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); @@ -751,25 +872,36 @@ async fn should_serve_events_with_query_for_future_event(path: &str) { let url = url(server_address, path, Some(25)); let (expected_events, final_id) = fixture.all_filtered_events(path); + let (expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); fixture.stop_server().await; - assert_eq!(received_events, expected_events); + compare_received_events_for_legacy_endpoints( + is_legacy_endpoint, + expected_events, + received_events, + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_main_events_with_query_for_future_event() { - should_serve_events_with_query_for_future_event(MAIN_PATH).await; + should_serve_events_with_query_for_future_event(MAIN_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_deploy_accepted_events_with_query_for_future_event() { - should_serve_events_with_query_for_future_event(DEPLOYS_PATH).await; + should_serve_events_with_query_for_future_event(DEPLOYS_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_serve_signature_events_with_query_for_future_event() { - should_serve_events_with_query_for_future_event(SIGS_PATH).await; + should_serve_events_with_query_for_future_event(SIGS_PATH, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_firehose_events_with_query_for_future_event() { + should_serve_events_with_query_for_future_event(ROOT_PATH, false).await; } /// Checks that when a server is shut down (e.g. for a node upgrade), connected clients don't have @@ -782,39 +914,24 @@ async fn server_exit_should_gracefully_shut_down_stream() { // Start the server, waiting for three clients to connect. let mut server_behavior = ServerBehavior::new(); let barrier1 = server_behavior.add_client_sync_before_event(0); - let barrier2 = server_behavior.add_client_sync_before_event(0); - let barrier3 = server_behavior.add_client_sync_before_event(0); let server_address = fixture.run_server(server_behavior).await; - let url1 = url(server_address, MAIN_PATH, None); - let url2 = url(server_address, DEPLOYS_PATH, None); - let url3 = url(server_address, SIGS_PATH, None); + let url1 = url(server_address, ROOT_PATH, None); // Run the three clients, and stop the server after a short delay. - let (received_events1, received_events2, received_events3, _) = join!( - subscribe(&url1, barrier1, EVENT_COUNT, "client 1"), - subscribe(&url2, barrier2, EVENT_COUNT, "client 2"), - subscribe(&url3, barrier3, EVENT_COUNT, "client 3"), - async { - time::sleep(DELAY_BETWEEN_EVENTS * EVENT_COUNT / 2).await; - fixture.stop_server().await - } - ); + let (received_events1, _) = join!(subscribe(&url1, barrier1, EVENT_COUNT, "client 1"), async { + time::sleep(DELAY_BETWEEN_EVENTS * EVENT_COUNT / 2).await; + fixture.stop_server().await + }); // Ensure all clients' streams terminated without error. let received_events1 = received_events1.unwrap(); - let received_events2 = received_events2.unwrap(); - let received_events3 = received_events3.unwrap(); // Ensure all clients received some events... assert!(!received_events1.is_empty()); - assert!(!received_events2.is_empty()); - assert!(!received_events3.is_empty()); // ...but not the full set they would have if the server hadn't stopped early. - assert!(received_events1.len() < fixture.all_filtered_events(MAIN_PATH).0.len()); - assert!(received_events2.len() < fixture.all_filtered_events(DEPLOYS_PATH).0.len()); - assert!(received_events3.len() < fixture.all_filtered_events(SIGS_PATH).0.len()); + assert!(received_events1.len() < fixture.all_filtered_events(ROOT_PATH).0.len()); } /// Checks that clients which don't consume the events in a timely manner are forcibly disconnected @@ -829,40 +946,28 @@ async fn lagging_clients_should_be_disconnected() { // at most `MAX_EVENT_COUNT` events, but the clients' futures should return before that, having // been disconnected for lagging. let mut server_behavior = ServerBehavior::new_for_lagging_test(); - let barrier_main = server_behavior.add_client_sync_before_event(0); - let barrier_deploys = server_behavior.add_client_sync_before_event(0); - let barrier_sigs = server_behavior.add_client_sync_before_event(0); + let barrier_events = server_behavior.add_client_sync_before_event(0); let server_address = fixture.run_server(server_behavior).await; - let url_main = url(server_address, MAIN_PATH, None); - let url_deploys = url(server_address, DEPLOYS_PATH, None); - let url_sigs = url(server_address, SIGS_PATH, None); + let url_events = url(server_address, ROOT_PATH, None); // Run the slow clients, then stop the server. - let (result_slow_main, result_slow_deploys, result_slow_sigs) = join!( - subscribe_slow(&url_main, barrier_main, "client 1"), - subscribe_slow(&url_deploys, barrier_deploys, "client 2"), - subscribe_slow(&url_sigs, barrier_sigs, "client 3"), - ); + let result_slow_events = subscribe_slow(&url_events, barrier_events, "client 1").await; fixture.stop_server().await; // Ensure both slow clients' streams terminated with an `UnexpectedEof` error. let check_error = |result: Result<(), reqwest::Error>| { let kind = result .unwrap_err() .source() - .expect("reqwest::Error should have source") - .downcast_ref::() - .expect("reqwest::Error's source should be a hyper::Error") + .expect("should have source") .source() - .expect("hyper::Error should have source") + .expect("should have source") .downcast_ref::() - .expect("hyper::Error's source should be a std::io::Error") + .expect("should be a std::io::Error") .kind(); assert!(matches!(kind, io::ErrorKind::UnexpectedEof)); }; - check_error(result_slow_main); - check_error(result_slow_deploys); - check_error(result_slow_sigs); + check_error(result_slow_events); } /// Checks that clients using the correct but wrong path get a helpful error response. @@ -882,30 +987,15 @@ async fn should_handle_bad_url_path() { format!("http://{}/{}?{}=0", server_address, QUERY_FIELD, ROOT_PATH), format!("http://{}/{}/bad", server_address, ROOT_PATH), format!("http://{}/{}/bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH), - format!("http://{}/{}/{}bad", server_address, ROOT_PATH, MAIN_PATH), - format!("http://{}/{}/{}bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, MAIN_PATH), - format!("http://{}/{}/{}bad", server_address, ROOT_PATH, DEPLOYS_PATH), - format!("http://{}/{}/{}bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, DEPLOYS_PATH), - format!("http://{}/{}/{}bad", server_address, ROOT_PATH, SIGS_PATH), - format!("http://{}/{}/{}bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, SIGS_PATH), - format!("http://{}/{}/{}/bad", server_address, ROOT_PATH, MAIN_PATH), - format!("http://{}/{}/{}/bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, MAIN_PATH), - format!("http://{}/{}/{}/bad", server_address, ROOT_PATH, DEPLOYS_PATH), - format!("http://{}/{}/{}/bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, DEPLOYS_PATH), - format!("http://{}/{}/{}/bad", server_address, ROOT_PATH, SIGS_PATH), - format!("http://{}/{}/{}/bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH, SIGS_PATH), ]; - let expected_body = format!( - "invalid path: expected '/{0}/{1}', '/{0}/{2}' or '/{0}/{3}'", - ROOT_PATH, MAIN_PATH, DEPLOYS_PATH, SIGS_PATH - ); + let expected_body = "invalid path: expected '/events/main', '/events/deploys' or '/events/sigs or '/events/sidecar'"; for url in &urls { let response = reqwest::get(url).await.unwrap(); assert_eq!(response.status(), StatusCode::NOT_FOUND, "URL: {}", url); assert_eq!( response.text().await.unwrap().trim(), - &expected_body, + expected_body, "URL: {}", url ); @@ -921,34 +1011,21 @@ async fn start_query_url_test() -> (TestFixture, SocketAddr) { (fixture, server_address) } -fn build_urls(server_address: SocketAddr) -> (String, String, String) { - let main_url = format!("http://{}/{}/{}", server_address, ROOT_PATH, MAIN_PATH); - let deploys_url = format!("http://{}/{}/{}", server_address, ROOT_PATH, DEPLOYS_PATH); - let sigs_url = format!("http://{}/{}/{}", server_address, ROOT_PATH, SIGS_PATH); - (main_url, deploys_url, sigs_url) +fn build_urls(server_address: SocketAddr) -> String { + format!("http://{}/{}", server_address, ROOT_PATH) } /// Checks that clients using the correct but wrong query get a helpful error /// response. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_handle_bad_url_query() { let (mut fixture, server_address) = start_query_url_test().await; - let (main_url, deploys_url, sigs_url) = build_urls(server_address); + let events_url = build_urls(server_address); let urls = [ - format!("{}?not-a-kv-pair", main_url), - format!("{}?not-a-kv-pair", deploys_url), - format!("{}?not-a-kv-pair", sigs_url), - format!("{}?start_fro=0", main_url), - format!("{}?start_fro=0", deploys_url), - format!("{}?start_fro=0", sigs_url), - format!("{}?{}=not-integer", main_url, QUERY_FIELD), - format!("{}?{}=not-integer", deploys_url, QUERY_FIELD), - format!("{}?{}=not-integer", sigs_url, QUERY_FIELD), - format!("{}?{}='0'", main_url, QUERY_FIELD), - format!("{}?{}='0'", deploys_url, QUERY_FIELD), - format!("{}?{}='0'", sigs_url, QUERY_FIELD), - format!("{}?{}=0&extra=1", main_url, QUERY_FIELD), - format!("{}?{}=0&extra=1", deploys_url, QUERY_FIELD), - format!("{}?{}=0&extra=1", sigs_url, QUERY_FIELD), + format!("{}?not-a-kv-pair", events_url), + format!("{}?start_fro=0", events_url), + format!("{}?{}=not-integer", events_url, QUERY_FIELD), + format!("{}?{}='0'", events_url, QUERY_FIELD), + format!("{}?{}=0&extra=1", events_url, QUERY_FIELD), ]; let expected_body = format!( "invalid query: expected single field '{}='", @@ -972,8 +1049,54 @@ async fn should_handle_bad_url_query() { fixture.stop_server().await; } +async fn subscribe_for_comment( + url: &str, + barrier: Arc, + final_event_id: Id, + client_id: &str, +) -> String { + timeout(Duration::from_secs(60), barrier.wait()) + .await + .unwrap(); + let response = reqwest::get(url).await.unwrap(); + timeout(Duration::from_secs(60), barrier.wait()) + .await + .unwrap(); + let stream = response.bytes_stream(); + let final_id_line = format!("id:{}", final_event_id); // This theoretically is not optimal since we don't need to read all the events from the test, + // but it's not easy to determine what id is the first one in the stream for legacy tests. + let keepalive = ":"; + fetch_text( + Box::pin(stream), + final_id_line, + keepalive, + client_id, + final_event_id, + ) + .await + .unwrap() +} + +async fn should_get_comment(path: &str) { + let mut rng = TestRng::new(); + let mut fixture = TestFixture::new(&mut rng); + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + // Consume these and stop the server. + let url = url(server_address, path, None); + let (expected_events, final_id) = fixture.all_filtered_events(path); + let (_expected_events, final_id) = adjust_final_id(true, expected_events, final_id); + let text = subscribe_for_comment(&url, barrier, final_id, "client 1").await; + fixture.stop_server().await; + let expected_start = format!(":{}", LEGACY_ENDPOINT_NOTICE); + assert!(text.as_str().starts_with(expected_start.as_str())); +} + +#[allow(clippy::too_many_lines)] /// Check that a server which restarts continues from the previous numbering of event IDs. -async fn should_persist_event_ids(path: &str) { +async fn should_persist_event_ids(path: &str, is_legacy_endpoint: bool) { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); @@ -985,7 +1108,9 @@ async fn should_persist_event_ids(path: &str) { // Consume these and stop the server. let url = url(server_address, path, None); - let (_expected_events, final_id) = fixture.all_filtered_events(path); + let (expected_events, final_id) = fixture.all_filtered_events(path); + let (_expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let _ = subscribe(&url, barrier, final_id, "client 1") .await .unwrap(); @@ -994,7 +1119,6 @@ async fn should_persist_event_ids(path: &str) { }; assert!(first_run_final_id > 0); - { // Start a new server with a client barrier set for just before event ID 100 + 1 (the extra // event being the `Shutdown`). @@ -1009,32 +1133,52 @@ async fn should_persist_event_ids(path: &str) { // Consume the events and assert their IDs are all >= `first_run_final_id`. let url = url(server_address, path, None); let (expected_events, final_id) = fixture.filtered_events(path, EVENT_COUNT + 1); + let (expected_events, final_id) = + adjust_final_id(is_legacy_endpoint, expected_events, final_id); let received_events = subscribe(&url, barrier, final_id, "client 2") .await .unwrap(); fixture.stop_server().await; - - assert_eq!(received_events, expected_events); assert!(received_events .iter() .skip(1) .all(|event| event.id.unwrap() >= first_run_final_id)); + compare_received_events_for_legacy_endpoints( + is_legacy_endpoint, + expected_events, + received_events, + ); } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn should_persist_main_event_ids() { - should_persist_event_ids(MAIN_PATH).await; +async fn should_get_comment_on_first_message_in_deploys() { + should_get_comment(DEPLOYS_PATH).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_get_comment_on_first_message_in_sigs() { + should_get_comment(SIGS_PATH).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_get_comment_on_first_message_in_main() { + should_get_comment(MAIN_PATH).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_persist_deploy_accepted_event_ids() { - should_persist_event_ids(DEPLOYS_PATH).await; + should_persist_event_ids(DEPLOYS_PATH, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn should_persist_signature_event_ids() { - should_persist_event_ids(SIGS_PATH).await; + should_persist_event_ids(SIGS_PATH, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_persist_firehose_event_ids() { + should_persist_event_ids(ROOT_PATH, false).await; } /// Check that a server handles wrapping round past the maximum value for event IDs. @@ -1081,18 +1225,8 @@ async fn should_handle_wrapping_past_max_event_id(path: &str) { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn should_handle_wrapping_past_max_event_id_for_main() { - should_handle_wrapping_past_max_event_id(MAIN_PATH).await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn should_handle_wrapping_past_max_event_id_for_deploy_accepted() { - should_handle_wrapping_past_max_event_id(DEPLOYS_PATH).await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn should_handle_wrapping_past_max_event_id_for_signatures() { - should_handle_wrapping_past_max_event_id(SIGS_PATH).await; +async fn should_handle_wrapping_past_max_event_id_for_events() { + should_handle_wrapping_past_max_event_id(ROOT_PATH).await; } /// Checks that a server rejects new clients with an HTTP 503 when it already has the specified @@ -1103,78 +1237,43 @@ async fn should_limit_concurrent_subscribers() { let mut rng = TestRng::new(); let mut fixture = TestFixture::new(&mut rng); - // Start the server with `max_concurrent_subscribers == 3`, and set to wait for three clients to + // Start the server with `max_concurrent_subscribers == 4`, and set to wait for three clients to // connect at event 0 and another three at event 1. let mut server_behavior = ServerBehavior::new(); - server_behavior.set_max_concurrent_subscribers(3); + server_behavior.set_max_concurrent_subscribers(1); let barrier1 = server_behavior.add_client_sync_before_event(0); - let barrier2 = server_behavior.add_client_sync_before_event(0); - let barrier3 = server_behavior.add_client_sync_before_event(0); - let barrier4 = server_behavior.add_client_sync_before_event(1); let barrier5 = server_behavior.add_client_sync_before_event(1); - let barrier6 = server_behavior.add_client_sync_before_event(1); let server_address = fixture.run_server(server_behavior).await; - let url_main = url(server_address, MAIN_PATH, None); - let url_deploys = url(server_address, DEPLOYS_PATH, None); - let url_sigs = url(server_address, SIGS_PATH, None); + let url_root = url(server_address, ROOT_PATH, None); - let (expected_main_events, final_main_id) = fixture.all_filtered_events(MAIN_PATH); - let (expected_deploys_events, final_deploys_id) = fixture.all_filtered_events(DEPLOYS_PATH); - let (expected_sigs_events, final_sigs_id) = fixture.all_filtered_events(SIGS_PATH); + let (expected_events_root, final_id) = fixture.all_filtered_events(ROOT_PATH); // Run the six clients. - let ( - received_events_main, - received_events_deploys, - received_events_sigs, - empty_events_main, - empty_events_deploys, - empty_events_sigs, - ) = join!( - subscribe(&url_main, barrier1, final_main_id, "client 1"), - subscribe(&url_deploys, barrier2, final_deploys_id, "client 2"), - subscribe(&url_sigs, barrier3, final_sigs_id, "client 3"), - subscribe(&url_main, barrier4, final_main_id, "client 4"), - subscribe(&url_deploys, barrier5, final_deploys_id, "client 5"), - subscribe(&url_sigs, barrier6, final_sigs_id, "client 6"), + let (received_events_1, empty_events_1) = join!( + subscribe(&url_root, barrier1, final_id, "client 1"), + subscribe(&url_root, barrier5, final_id, "client 2"), ); // Check the first three received all expected events. - assert_eq!(received_events_main.unwrap(), expected_main_events); - assert_eq!(received_events_deploys.unwrap(), expected_deploys_events); - assert_eq!(received_events_sigs.unwrap(), expected_sigs_events); + assert_eq!(received_events_1.unwrap(), expected_events_root); // Check the second three received no events. - assert!(empty_events_main.unwrap().is_empty()); - assert!(empty_events_deploys.unwrap().is_empty()); - assert!(empty_events_sigs.unwrap().is_empty()); + assert!(empty_events_1.unwrap().is_empty()); // Check that now the first clients have all disconnected, three new clients can connect. Have // them start from event 80 to allow them to actually pull some events off the stream (as the // server has by now stopped creating any new events). let start_id = EVENT_COUNT - 20; - let url_main = url(server_address, MAIN_PATH, Some(start_id)); - let url_deploys = url(server_address, DEPLOYS_PATH, Some(start_id)); - let url_sigs = url(server_address, SIGS_PATH, Some(start_id)); + let url_root = url(server_address, ROOT_PATH, Some(start_id)); - let (expected_main_events, final_main_id) = fixture.filtered_events(MAIN_PATH, start_id); - let (expected_deploys_events, final_deploys_id) = - fixture.filtered_events(DEPLOYS_PATH, start_id); - let (expected_sigs_events, final_sigs_id) = fixture.filtered_events(SIGS_PATH, start_id); + let (expected_root_events, final_root_id) = fixture.filtered_events(ROOT_PATH, start_id); - let (received_events_main, received_events_deploys, received_events_sigs) = join!( - subscribe_no_sync(&url_main, final_main_id, "client 7"), - subscribe_no_sync(&url_deploys, final_deploys_id, "client 8"), - subscribe_no_sync(&url_sigs, final_sigs_id, "client 9"), - ); + let received_events_root = subscribe_no_sync(&url_root, final_root_id, "client 3").await; // Check the last three clients' received events are as expected. - assert_eq!(received_events_main.unwrap(), expected_main_events); - assert_eq!(received_events_deploys.unwrap(), expected_deploys_events); - assert_eq!(received_events_sigs.unwrap(), expected_sigs_events); - + assert_eq!(received_events_root.unwrap(), expected_root_events); fixture.stop_server().await; } @@ -1185,10 +1284,12 @@ fn build_id_filter(from: u128) -> FilterLambda { } let data = match event { - SseData::DeployAccepted { deploy } => serde_json::to_string(&DeployAccepted { - deploy_accepted: deploy.clone(), - }) - .unwrap(), + SseData::TransactionAccepted(transaction) => { + serde_json::to_string(&TransactionAccepted { + transaction_accepted: transaction.clone(), + }) + .unwrap() + } _ => serde_json::to_string(event).unwrap(), }; diff --git a/event_sidecar/src/lib.rs b/event_sidecar/src/lib.rs new file mode 100644 index 00000000..70381f3d --- /dev/null +++ b/event_sidecar/src/lib.rs @@ -0,0 +1,527 @@ +#![deny(clippy::complexity)] +#![deny(clippy::cognitive_complexity)] +#![deny(clippy::too_many_lines)] + +extern crate core; +mod admin_server; +mod api_version_manager; +mod database; +mod event_handling_service; +mod event_stream_server; +pub mod rest_server; +mod sql; +#[cfg(test)] +pub(crate) mod testing; +#[cfg(test)] +pub(crate) mod tests; +mod types; +mod utils; +use std::{collections::HashMap, path::PathBuf, process::ExitCode, sync::Arc, time::Duration}; + +use crate::types::config::LegacySseApiTag; +use crate::{ + event_stream_server::{Config as SseConfig, EventStreamServer}, + rest_server::run_server as start_rest_server, + types::sse_events::*, +}; +use anyhow::{Context, Error}; +use api_version_manager::{ApiVersionManager, GuardedApiVersionManager}; +use casper_event_listener::{ + EventListener, EventListenerBuilder, NodeConnectionInterface, SseEvent, +}; +use casper_event_types::{sse_data::SseData, Filter}; +use casper_types::ProtocolVersion; +use event_handling_service::{ + DbSavingEventHandlingService, EventHandlingService, NoDbEventHandlingService, +}; +use futures::future::join_all; +use tokio::sync::Mutex; +use tokio::{ + sync::mpsc::{channel as mpsc_channel, Receiver, Sender}, + task::JoinHandle, + time::sleep, +}; +use tracing::{error, info}; +#[cfg(feature = "additional-metrics")] +use utils::start_metrics_thread; + +pub use admin_server::run_server as run_admin_server; +pub use database::DatabaseConfigError; +pub use types::{ + config::{ + AdminApiServerConfig, Connection, RestApiServerConfig, SseEventServerConfig, StorageConfig, + StorageConfigSerdeTarget, + }, + database::{Database, LazyDatabaseWrapper}, +}; + +const DEFAULT_CHANNEL_SIZE: usize = 1000; + +pub async fn run( + config: SseEventServerConfig, + index_storage_folder: String, + maybe_database: Option, + maybe_network_name: Option, +) -> Result { + validate_config(&config)?; + let (event_listeners, sse_data_receivers) = build_event_listeners(&config, maybe_network_name)?; + // This channel allows SseData to be sent from multiple connected nodes to the single EventStreamServer. + let (outbound_sse_data_sender, outbound_sse_data_receiver) = + mpsc_channel(config.outbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); + let connection_configs = config.connections.clone(); + + // Task to manage incoming events from all three filters + let listening_task_handle = start_sse_processors( + connection_configs, + event_listeners, + sse_data_receivers, + maybe_database, + outbound_sse_data_sender.clone(), + ); + + let event_broadcasting_handle = start_event_broadcasting( + &config, + index_storage_folder, + outbound_sse_data_receiver, + config + .emulate_legacy_sse_apis + .as_ref() + .map(|v| v.contains(&LegacySseApiTag::V1)) + .unwrap_or(false), + ); + info!(address = %config.event_stream_server.port, "started {} server", "SSE"); + tokio::try_join!( + flatten_handle(event_broadcasting_handle), + flatten_handle(listening_task_handle), + ) + .map(|_| Ok(ExitCode::SUCCESS))? +} + +fn start_event_broadcasting( + config: &SseEventServerConfig, + index_storage_folder: String, + mut outbound_sse_data_receiver: Receiver<(SseData, Option)>, + enable_legacy_filters: bool, +) -> JoinHandle> { + let event_stream_server_port = config.event_stream_server.port; + let buffer_length = config.event_stream_server.event_stream_buffer_length; + let max_concurrent_subscribers = config.event_stream_server.max_concurrent_subscribers; + tokio::spawn(async move { + // Create new instance for the Sidecar's Event Stream Server + let mut event_stream_server = EventStreamServer::new( + SseConfig::new( + event_stream_server_port, + Some(buffer_length), + Some(max_concurrent_subscribers), + ), + PathBuf::from(index_storage_folder), + enable_legacy_filters, + ) + .context("Error starting EventStreamServer")?; + while let Some((sse_data, inbound_filter)) = outbound_sse_data_receiver.recv().await { + event_stream_server.broadcast(sse_data, inbound_filter); + } + Err::<(), Error>(Error::msg("Event broadcasting finished")) + }) +} + +fn start_sse_processors( + connection_configs: Vec, + event_listeners: Vec, + sse_data_receivers: Vec>, + maybe_database: Option, + outbound_sse_data_sender: Sender<(SseData, Option)>, +) -> JoinHandle> { + tokio::spawn(async move { + let mut join_handles = Vec::with_capacity(event_listeners.len()); + let api_version_manager = ApiVersionManager::new(); + + for ((mut event_listener, connection_config), sse_data_receiver) in event_listeners + .into_iter() + .zip(connection_configs) + .zip(sse_data_receivers) + { + tokio::spawn(async move { + let res = event_listener.stream_aggregated_events().await; + if let Err(e) = res { + let addr = event_listener.get_node_interface().ip_address.to_string(); + error!("Disconnected from {}. Reason: {}", addr, e.to_string()); + } + }); + let join_handle = spawn_sse_processor( + maybe_database.clone(), + sse_data_receiver, + &outbound_sse_data_sender, + connection_config, + &api_version_manager, + ); + join_handles.push(join_handle); + } + + let _ = join_all(join_handles).await; + //Send Shutdown to the sidecar sse endpoint + let _ = outbound_sse_data_sender + .send((SseData::Shutdown, None)) + .await; + // Below sleep is a workaround to allow the above Shutdown to propagate. + // If we don't do this there is a race condition between handling of the message and dropping of the outbound server + // which happens when we leave this function and the `tokio::try_join!` exits due to this. This race condition causes 9 of 10 + // tries to not propagate the Shutdown (ususally drop happens faster than message propagation to outbound). + // Fixing this race condition would require rewriting a lot of code. AFAICT the only drawback to this workaround is that the + // rest server and the sse server will exit 200ms later than it would without it. + sleep(Duration::from_millis(200)).await; + Err::<(), Error>(Error::msg("Connected node(s) are unavailable")) + }) +} + +fn spawn_sse_processor( + maybe_database: Option, + sse_data_receiver: Receiver, + outbound_sse_data_sender: &Sender<(SseData, Option)>, + connection_config: Connection, + api_version_manager: &std::sync::Arc>, +) -> JoinHandle> { + match maybe_database { + Some(Database::SqliteDatabaseWrapper(db)) => { + let event_handling_service = DbSavingEventHandlingService::new( + outbound_sse_data_sender.clone(), + db, + connection_config.enable_logging, + ); + tokio::spawn(sse_processor( + sse_data_receiver, + event_handling_service, + false, + api_version_manager.clone(), + )) + } + Some(Database::PostgreSqlDatabaseWrapper(db)) => { + let event_handling_service = DbSavingEventHandlingService::new( + outbound_sse_data_sender.clone(), + db, + connection_config.enable_logging, + ); + tokio::spawn(sse_processor( + sse_data_receiver, + event_handling_service, + true, + api_version_manager.clone(), + )) + } + None => { + let event_handling_service = NoDbEventHandlingService::new( + outbound_sse_data_sender.clone(), + connection_config.enable_logging, + ); + tokio::spawn(sse_processor( + sse_data_receiver, + event_handling_service, + true, + api_version_manager.clone(), + )) + } + } +} + +pub async fn run_rest_server( + rest_server_config: RestApiServerConfig, + database: Database, +) -> Result { + match database { + Database::SqliteDatabaseWrapper(db) => start_rest_server(rest_server_config, db).await, + Database::PostgreSqlDatabaseWrapper(db) => start_rest_server(rest_server_config, db).await, + } + .map(|_| ExitCode::SUCCESS) +} + +fn build_event_listeners( + config: &SseEventServerConfig, + maybe_network_name: Option, +) -> Result<(Vec, Vec>), Error> { + let mut event_listeners = Vec::with_capacity(config.connections.len()); + let mut sse_data_receivers = Vec::new(); + for connection in &config.connections { + let (inbound_sse_data_sender, inbound_sse_data_receiver) = + mpsc_channel(config.inbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); + sse_data_receivers.push(inbound_sse_data_receiver); + let event_listener = builder( + connection, + inbound_sse_data_sender, + maybe_network_name.clone(), + )? + .build(); + event_listeners.push(event_listener?); + } + Ok((event_listeners, sse_data_receivers)) +} + +fn builder( + connection: &Connection, + inbound_sse_data_sender: Sender, + maybe_network_name: Option, +) -> Result { + let node_interface = NodeConnectionInterface { + ip_address: connection.ip_address, + sse_port: connection.sse_port, + rest_port: connection.rest_port, + network_name: maybe_network_name, + }; + let event_listener_builder = EventListenerBuilder { + node: node_interface, + max_connection_attempts: connection.max_attempts, + delay_between_attempts: Duration::from_secs( + connection.delay_between_retries_in_seconds as u64, + ), + allow_partial_connection: connection.allow_partial_connection, + sse_event_sender: inbound_sse_data_sender, + connection_timeout: Duration::from_secs( + connection.connection_timeout_in_seconds.unwrap_or(5) as u64, + ), + sleep_between_keep_alive_checks: Duration::from_secs( + connection + .sleep_between_keep_alive_checks_in_seconds + .unwrap_or(60) as u64, + ), + no_message_timeout: Duration::from_secs( + connection.no_message_timeout_in_seconds.unwrap_or(120) as u64, + ), + }; + Ok(event_listener_builder) +} + +fn validate_config(config: &SseEventServerConfig) -> Result<(), Error> { + if config + .connections + .iter() + .any(|connection| connection.max_attempts < 1) + { + return Err(Error::msg( + "Unable to run: max_attempts setting must be above 0 for the sidecar to attempt connection" + )); + } + Ok(()) +} + +async fn flatten_handle(handle: JoinHandle>) -> Result { + match handle.await { + Ok(Ok(result)) => Ok(result), + Ok(Err(err)) => Err(err), + Err(join_err) => Err(Error::from(join_err)), + } +} + +/// Function to handle single event in the sse_processor. +/// Returns false if the handling indicated that no other messages should be processed. +/// Returns true otherwise. +#[allow(clippy::too_many_lines)] +async fn handle_single_event( + sse_event: SseEvent, + event_handling_service: EHS, + api_version_manager: GuardedApiVersionManager, +) { + match &sse_event.data { + SseData::SidecarVersion(_) => { + error!("Received SseData::SidecarVersion on inbound SSE from the node which should never happen"); + //Do nothing -> the inbound shouldn't produce this endpoint, it can be only produced by sidecar + //to the outbound + } + SseData::ApiVersion(version) => { + handle_api_version( + version, + api_version_manager, + &event_handling_service, + sse_event.inbound_filter, + ) + .await; + } + SseData::BlockAdded { block, block_hash } => { + event_handling_service + .handle_block_added(*block_hash, block.clone(), sse_event) + .await; + } + SseData::TransactionAccepted(transaction) => { + let transaction_accepted = TransactionAccepted::new(transaction.clone()); + event_handling_service + .handle_transaction_accepted(transaction_accepted, sse_event) + .await; + } + SseData::TransactionExpired { transaction_hash } => { + event_handling_service + .handle_transaction_expired(*transaction_hash, sse_event) + .await; + } + SseData::TransactionProcessed { + transaction_hash, + initiator_addr, + timestamp, + ttl, + block_hash, + execution_result, + messages, + } => { + let transaction_processed = TransactionProcessed::new( + transaction_hash.clone(), + initiator_addr.clone(), + *timestamp, + *ttl, + block_hash.clone(), + execution_result.clone(), + messages.clone(), + ); + event_handling_service + .handle_transaction_processed(transaction_processed, sse_event) + .await; + } + SseData::Fault { + era_id, + timestamp, + public_key, + } => { + event_handling_service + .handle_fault(*era_id, *timestamp, public_key.clone(), sse_event) + .await; + } + SseData::FinalitySignature(fs) => { + let finality_signature = FinalitySignature::new(fs.clone()); + event_handling_service + .handle_finality_signature(finality_signature, sse_event) + .await; + } + SseData::Step { + era_id, + execution_effects, + } => { + let step = Step::new(*era_id, execution_effects.clone()); + event_handling_service.handle_step(step, sse_event).await; + } + SseData::Shutdown => event_handling_service.handle_shutdown(sse_event).await, + } +} + +async fn handle_api_version( + version: &ProtocolVersion, + api_version_manager: Arc>, + event_handling_service: &EHS, + inbound_filter: Filter, +) { + let version = *version; + let mut manager_guard = api_version_manager.lock().await; + let changed_newest_version = manager_guard.store_version(version); + if changed_newest_version { + event_handling_service + .handle_api_version(version, inbound_filter) + .await; + } + drop(manager_guard); +} + +async fn sse_processor( + inbound_sse_data_receiver: Receiver, + event_handling_service: EHS, + database_supports_multithreaded_processing: bool, + api_version_manager: GuardedApiVersionManager, +) -> Result<(), Error> { + #[cfg(feature = "additional-metrics")] + let metrics_tx = start_metrics_thread("sse_save".to_string()); + // This task starts the listener pushing events to the sse_data_receiver + if database_supports_multithreaded_processing { + start_multi_threaded_events_consumer( + inbound_sse_data_receiver, + event_handling_service, + api_version_manager, + #[cfg(feature = "additional-metrics")] + metrics_tx, + ) + .await; + } else { + start_single_threaded_events_consumer( + inbound_sse_data_receiver, + event_handling_service, + api_version_manager, + #[cfg(feature = "additional-metrics")] + metrics_tx, + ) + .await; + } + Ok(()) +} + +fn handle_events_in_thread( + mut queue_rx: Receiver, + event_handling_service: EHS, + api_version_manager: GuardedApiVersionManager, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + tokio::spawn(async move { + while let Some(sse_event) = queue_rx.recv().await { + handle_single_event( + sse_event, + event_handling_service.clone(), + api_version_manager.clone(), + ) + .await; + #[cfg(feature = "additional-metrics")] + let _ = metrics_sender.send(()).await; + } + }); +} + +fn build_queues(cache_size: usize) -> HashMap, Receiver)> { + let mut map = HashMap::new(); + map.insert(Filter::Events, mpsc_channel(cache_size)); + map +} + +async fn start_multi_threaded_events_consumer< + EHS: EventHandlingService + Clone + Send + Sync + 'static, +>( + mut inbound_sse_data_receiver: Receiver, + event_handling_service: EHS, + api_version_manager: GuardedApiVersionManager, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + let mut senders_and_receivers_map = build_queues(DEFAULT_CHANNEL_SIZE); + let mut senders_map = HashMap::new(); + for (filter, (tx, rx)) in senders_and_receivers_map.drain() { + handle_events_in_thread( + rx, + event_handling_service.clone(), + api_version_manager.clone(), + #[cfg(feature = "additional-metrics")] + metrics_sender.clone(), + ); + senders_map.insert(filter, tx); + } + + while let Some(sse_event) = inbound_sse_data_receiver.recv().await { + if let Some(tx) = senders_map.get(&sse_event.inbound_filter) { + tx.send(sse_event).await.unwrap() + } else { + error!( + "Failed to find an sse handler queue for inbound filter {}", + sse_event.inbound_filter + ); + break; + } + } +} + +async fn start_single_threaded_events_consumer< + EHS: EventHandlingService + Clone + Send + Sync + 'static, +>( + mut inbound_sse_data_receiver: Receiver, + event_handling_service: EHS, + api_version_manager: GuardedApiVersionManager, + #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, +) { + while let Some(sse_event) = inbound_sse_data_receiver.recv().await { + handle_single_event( + sse_event, + event_handling_service.clone(), + api_version_manager.clone(), + ) + .await; + #[cfg(feature = "additional-metrics")] + let _ = metrics_sender.send(()).await; + } +} diff --git a/event_sidecar/src/rest_server.rs b/event_sidecar/src/rest_server.rs new file mode 100644 index 00000000..2612677c --- /dev/null +++ b/event_sidecar/src/rest_server.rs @@ -0,0 +1,87 @@ +mod errors; +pub mod filters; +mod handlers; +mod metrics_layer; +mod openapi; +mod status; +#[cfg(test)] +mod tests; + +use anyhow::Error; +use hyper::Server; +use std::{net::TcpListener, time::Duration}; +use tower::{buffer::Buffer, make::Shared, ServiceBuilder}; +use tracing::info; +use warp::Filter; + +use crate::{ + types::{config::RestApiServerConfig, database::DatabaseReader}, + utils::resolve_address, +}; + +use self::metrics_layer::MetricsLayer; + +const BIND_ALL_INTERFACES: &str = "0.0.0.0"; + +pub async fn run_server( + config: RestApiServerConfig, + database: Db, +) -> Result<(), Error> { + let api = filters::combined_filters(database); + let address = format!("{}:{}", BIND_ALL_INTERFACES, config.port); + let socket_address = resolve_address(&address)?; + + let listener = TcpListener::bind(socket_address)?; + + let warp_service = warp::service(api.with(warp::cors().allow_any_origin())); + let tower_service = ServiceBuilder::new() + .concurrency_limit(config.max_concurrent_requests as usize) + .rate_limit( + config.max_requests_per_second as u64, + Duration::from_secs(1), + ) + .layer(MetricsLayer::new(path_abstraction_for_metrics)) + .service(warp_service); + info!(address = %address, "started {} server", "REST API"); + Server::from_tcp(listener)? + .serve(Shared::new(Buffer::new(tower_service, 50))) + .await?; + + Err(Error::msg("REST server shutting down")) +} + +pub(super) fn path_abstraction_for_metrics(path: &str) -> String { + let parts = path + .split('/') + .filter(|el| !el.is_empty()) + .collect::>(); + if parts.is_empty() { + return "unknown".to_string(); + } + if parts[0] == "block" { + return "/block/(...)".to_string(); + } + if parts[0] == "step" { + return "/step/(...)".to_string(); + } + if parts[0] == "faults" { + return "/faults/(...)".to_string(); + } + if parts[0] == "signatures" { + return "/signatures/(...)".to_string(); + } + if parts[0] == "transaction" { + if parts.len() == 3 { + // paths like /transaction/deploy/ + return "/transaction/(...)".to_string(); + } + if parts.len() == 4 { + // paths like /transaction/accepted/deploy/ + return format!("/transaction/{}/(...)", parts[1]); + } + } + if parts[0] == "status" { + return "/status".to_string(); + } + "unknown".to_string() +} diff --git a/sidecar/src/rest_server/errors.rs b/event_sidecar/src/rest_server/errors.rs similarity index 100% rename from sidecar/src/rest_server/errors.rs rename to event_sidecar/src/rest_server/errors.rs diff --git a/sidecar/src/rest_server/filters.rs b/event_sidecar/src/rest_server/filters.rs similarity index 59% rename from sidecar/src/rest_server/filters.rs rename to event_sidecar/src/rest_server/filters.rs index e10435e7..cd5b1ba0 100644 --- a/sidecar/src/rest_server/filters.rs +++ b/event_sidecar/src/rest_server/filters.rs @@ -1,11 +1,39 @@ -use super::{errors::handle_rejection, handlers, openapi::build_open_api_filters}; +use super::{ + errors::handle_rejection, handlers, openapi::build_open_api_filters, status::status_filters, +}; use crate::{ - types::database::DatabaseReader, + types::database::{DatabaseReader, TransactionTypeId}, utils::{root_filter, InvalidPath}, }; -use std::convert::Infallible; +use std::{convert::Infallible, str::FromStr}; use warp::Filter; +pub enum TransactionTypeIdFilter { + Deploy, + Version1, +} + +impl From for TransactionTypeId { + fn from(val: TransactionTypeIdFilter) -> Self { + match val { + TransactionTypeIdFilter::Deploy => TransactionTypeId::Deploy, + TransactionTypeIdFilter::Version1 => TransactionTypeId::Version1, + } + } +} + +impl FromStr for TransactionTypeIdFilter { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "deploy" => Ok(TransactionTypeIdFilter::Deploy), + "version1" => Ok(TransactionTypeIdFilter::Version1), + _ => Err(format!("Invalid transaction type id: {}", s)), + } + } +} + /// Helper function to specify available filters. /// Input: the database with data to be filtered. /// Return: the filtered data. @@ -15,12 +43,13 @@ pub(super) fn combined_filters( root_filter() .or(root_and_invalid_path()) .or(block_filters(db.clone())) - .or(deploy_filters(db.clone())) + .or(transaction_filters(db.clone())) .or(step_by_era(db.clone())) .or(faults_by_public_key(db.clone())) .or(faults_by_era(db.clone())) .or(finality_signatures_by_block(db)) .or(build_open_api_filters()) + .or(status_filters()) .recover(handle_rejection) } @@ -46,19 +75,19 @@ fn block_filters( .or(block_by_height(db)) } -/// Helper function to specify available filters for deploy information. +/// Helper function to specify available filters for transaction information. /// Input: the database with data to be filtered. /// Return: the filtered data. -fn deploy_filters( +fn transaction_filters( db: Db, ) -> impl Filter + Clone { - deploy_by_hash(db.clone()) - .or(deploy_accepted_by_hash(db.clone())) - .or(deploy_processed_by_hash(db.clone())) - .or(deploy_expired_by_hash(db)) + transaction_by_hash(db.clone()) + .or(transaction_accepted_by_hash(db.clone())) + .or(transaction_processed_by_hash(db.clone())) + .or(transaction_expired_by_hash(db)) } -/// Return information about the last block added to the linear chain. +/// Returns information about the last block added to the linear chain. /// Input: the database with data to be filtered. /// Return: data about the latest block. /// Path URL: block @@ -67,7 +96,7 @@ fn deploy_filters( get, path = "/block", responses( - (status = 200, description = "latest stored block", body = BlockAdded) + (status = 200, description = "latest stored block", body = BlockAddedEnveloped) ) )] pub fn latest_block( @@ -79,7 +108,7 @@ pub fn latest_block( .and_then(handlers::get_latest_block) } -/// Return information about a block given its block hash. +/// Returns information about a block given its block hash. /// Input: the database with data to be filtered. /// Return: data about the block specified. /// Path URL: block/ @@ -91,7 +120,7 @@ pub fn latest_block( ("block_hash" = String, Path, description = "Base64 encoded block hash of requested block") ), responses( - (status = 200, description = "fetch latest stored block", body = BlockAdded) + (status = 200, description = "fetch latest stored block", body = BlockAddedEnveloped) ) )] fn block_by_hash( @@ -103,7 +132,7 @@ fn block_by_hash( .and_then(handlers::get_block_by_hash) } -/// Return information about a block given a specific block height. +/// Returns information about a block given a specific block height. /// Input: the database with data to be filtered. /// Return: data about the block requested. /// Path URL: block/ @@ -115,7 +144,7 @@ fn block_by_hash( ("height" = u32, Path, description = "Height of the requested block") ), responses( - (status = 200, description = "fetch latest stored block", body = BlockAdded) + (status = 200, description = "fetch latest stored block", body = BlockAddedEnveloped) ) )] fn block_by_height( @@ -127,101 +156,101 @@ fn block_by_height( .and_then(handlers::get_block_by_height) } -/// Return an aggregate of the different states for the given deploy. This is a synthetic JSON not emitted by the node. -/// The output differs depending on the deploy's status, which changes over time as the deploy goes through its lifecycle. +/// Returns an aggregate of the different states for the given transaction. This is a synthetic JSON not emitted by the node. +/// The output differs depending on the transaction's status, which changes over time as the transaction goes through its lifecycle. /// Input: the database with data to be filtered. -/// Return: data about the deploy specified. -/// Path URL: deploy/ -/// Example: curl http://127.0.0.1:18888/deploy/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a +/// Return: data about the transaction specified. +/// Path URL: transaction// +/// Example: curl http://127.0.0.1:18888/transaction/version1/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a #[utoipa::path( get, - path = "/deploy/{deploy_hash}", + path = "/transaction/{transaction_type}/{transaction_hash}", params( - ("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy") + ("transaction_hash" = String, Path, description = "Base64 encoded transaction hash of requested transaction") ), responses( - (status = 200, description = "fetch aggregate data for deploy events", body = DeployAggregate) + (status = 200, description = "fetch aggregate data for transaction events", body = TransactionAggregate) ) )] -fn deploy_by_hash( +fn transaction_by_hash( db: Db, ) -> impl Filter + Clone { - warp::path!("deploy" / String) + warp::path!("transaction" / TransactionTypeIdFilter / String) .and(warp::get()) .and(with_db(db)) - .and_then(handlers::get_deploy_by_hash) + .and_then(handlers::get_transaction_by_identifier) } -/// Return information about an accepted deploy given its deploy hash. +/// Returns information about an accepted transaction given its transaction hash. /// Input: the database with data to be filtered. -/// Return: data about the accepted deploy. -/// Path URL: deploy/accepted/ -/// Example: curl http://127.0.0.1:18888/deploy/accepted/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a +/// Return: data about the accepted transaction. +/// Path URL: transaction/accepted// +/// Example: curl http://127.0.0.1:18888/transaction/accepted/version1/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a #[utoipa::path( get, - path = "/deploy/accepted/{deploy_hash}", + path = "/transaction/accepted/{transaction_type}/{transaction_hash}", params( - ("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy accepted") + ("transaction_hash" = String, Path, description = "Base64 encoded transaction hash of requested transaction accepted") ), responses( - (status = 200, description = "fetch stored deploy", body = DeployAccepted) + (status = 200, description = "fetch stored transaction", body = TransactionAcceptedEnveloped) ) )] -fn deploy_accepted_by_hash( +fn transaction_accepted_by_hash( db: Db, ) -> impl Filter + Clone { - warp::path!("deploy" / "accepted" / String) + warp::path!("transaction" / "accepted" / TransactionTypeIdFilter / String) .and(warp::get()) .and(with_db(db)) - .and_then(handlers::get_deploy_accepted_by_hash) + .and_then(handlers::get_transaction_accepted_by_hash) } #[utoipa::path( get, - path = "/deploy/expired/{deploy_hash}", + path = "/transaction/expired/{transaction_type}/{transaction_hash}", params( - ("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy expired") + ("transaction_hash" = String, Path, description = "Base64 encoded transaction hash of requested transaction expired") ), responses( - (status = 200, description = "fetch stored deploy", body = DeployExpired) + (status = 200, description = "fetch stored transaction", body = TransactionExpiredEnveloped) ) )] -/// Return information about a deploy that expired given its deploy hash. +/// Returns information about a transaction that expired given its transaction hash. /// Input: the database with data to be filtered. -/// Return: data about the expired deploy. -/// Path URL: deploy/expired/ -/// Example: curl http://127.0.0.1:18888/deploy/expired/e03544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a -fn deploy_expired_by_hash( +/// Return: data about the expired transaction. +/// Path URL: transaction/expired// +/// Example: curl http://127.0.0.1:18888/transaction/expired/version1/e03544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a +fn transaction_expired_by_hash( db: Db, ) -> impl Filter + Clone { - warp::path!("deploy" / "expired" / String) + warp::path!("transaction" / "expired" / TransactionTypeIdFilter / String) .and(warp::get()) .and(with_db(db)) - .and_then(handlers::get_deploy_expired_by_hash) + .and_then(handlers::get_transaction_expired_by_hash) } #[utoipa::path( get, - path = "/deploy/processed/{deploy_hash}", + path = "/transaction/processed/{transaction_type}/{transaction_hash}", params( - ("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy processed") + ("transaction_hash" = String, Path, description = "Base64 encoded transaction hash of requested transaction processed") ), responses( - (status = 200, description = "fetch stored deploy", body = DeployProcessed) + (status = 200, description = "fetch stored transaction", body = TransactionProcessedEnveloped) ) )] -/// Return information about a deploy that was processed given its deploy hash. +/// Returns information about a transaction that was processed given its transaction hash. /// Input: the database with data to be filtered. -/// Return: data about the processed deploy. -/// Path URL: deploy/processed/ -/// Example: curl http://127.0.0.1:18888/deploy/processed/f08944d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab77a -fn deploy_processed_by_hash( +/// Return: data about the processed transaction. +/// Path URL: transaction/processed// +/// Example: curl http://127.0.0.1:18888/transaction/processed/version1/f08944d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab77a +fn transaction_processed_by_hash( db: Db, ) -> impl Filter + Clone { - warp::path!("deploy" / "processed" / String) + warp::path!("transaction" / "processed" / TransactionTypeIdFilter / String) .and(warp::get()) .and(with_db(db)) - .and_then(handlers::get_deploy_processed_by_hash) + .and_then(handlers::get_transaction_processed_by_hash) } #[utoipa::path( @@ -231,10 +260,10 @@ fn deploy_processed_by_hash( ("public_key" = String, Path, description = "Base64 encoded validator's public key") ), responses( - (status = 200, description = "faults associated with a validator's public key", body = [Fault]) + (status = 200, description = "faults associated with a validator's public key", body = [FaultEnveloped]) ) )] -/// Return the faults associated with a validator's public key. +/// Returns the faults associated with a validator's public key. /// Input: the database with data to be filtered. /// Return: faults caused by the validator specified. /// Path URL: faults/ @@ -255,10 +284,10 @@ fn faults_by_public_key( ("era" = String, Path, description = "Era identifier") ), responses( - (status = 200, description = "faults associated with an era ", body = [Fault]) + (status = 200, description = "faults associated with an era ", body = [FaultEnveloped]) ) )] -/// Return the faults associated with an era given a valid era identifier. +/// Returns the faults associated with an era given a valid era identifier. /// Input: the database with data to be filtered. /// Return: fault information for a given era. /// Path URL: faults/ @@ -279,10 +308,10 @@ fn faults_by_era( ("block_hash" = String, Path, description = "Base64 encoded block hash of requested block") ), responses( - (status = 200, description = "finality signatures in a block", body = [FinalitySignature]) + (status = 200, description = "finality signatures in a block", body = [FinalitySignatureEnveloped]) ) )] -/// Return the finality signatures in a block given its block hash. +/// Returns the finality signatures in a block given its block hash. /// Input: the database with data to be filtered. /// Return: the finality signatures for the block specified. /// Path URL: signatures/ @@ -303,10 +332,10 @@ fn finality_signatures_by_block( ("era_id" = String, Path, description = "Era id") ), responses( - (status = 200, description = "step event emitted at the end of an era", body = Step) + (status = 200, description = "step event emitted at the end of an era", body = StepEnveloped) ) )] -/// Return the step event emitted at the end of an era, given a valid era identifier. +/// Returns the step event emitted at the end of an era, given a valid era identifier. /// Input: the database with data to be filtered. /// Return: the step event for a given era. /// Path URL: step/ diff --git a/sidecar/src/rest_server/handlers.rs b/event_sidecar/src/rest_server/handlers.rs similarity index 74% rename from sidecar/src/rest_server/handlers.rs rename to event_sidecar/src/rest_server/handlers.rs index d4e0ce1a..3dee3157 100644 --- a/sidecar/src/rest_server/handlers.rs +++ b/event_sidecar/src/rest_server/handlers.rs @@ -1,16 +1,18 @@ -use super::errors::StorageError; +use super::{errors::StorageError, filters::TransactionTypeIdFilter}; use crate::{ rest_server::errors::InvalidParam, types::database::{DatabaseReadError, DatabaseReader}, utils::Unexpected, }; use anyhow::Error; +use http::Response; +use hyper::Body; use serde::Serialize; use warp::{http::StatusCode, Rejection, Reply}; pub(super) async fn get_latest_block( db: Db, -) -> Result { +) -> Result, Rejection> { let db_result = db.get_latest_block().await; format_or_reject_storage_result(db_result) } @@ -18,7 +20,7 @@ pub(super) async fn get_latest_block( pub(super) async fn get_block_by_hash( hash: String, db: Db, -) -> Result { +) -> Result, Rejection> { check_hash_is_correct_format(&hash)?; let db_result = db.get_block_by_hash(&hash).await; format_or_reject_storage_result(db_result) @@ -27,44 +29,56 @@ pub(super) async fn get_block_by_hash( pub(super) async fn get_block_by_height( height: u64, db: Db, -) -> Result { +) -> Result, Rejection> { let db_result = db.get_block_by_height(height).await; format_or_reject_storage_result(db_result) } -pub(super) async fn get_deploy_by_hash( +pub(super) async fn get_transaction_by_identifier( + transaction_type: TransactionTypeIdFilter, hash: String, db: Db, -) -> Result { +) -> Result, Rejection> { check_hash_is_correct_format(&hash)?; - let db_result = db.get_deploy_aggregate_by_hash(&hash).await; + let db_result = db + .get_transaction_aggregate_by_identifier(&transaction_type.into(), &hash) + .await; format_or_reject_storage_result(db_result) } -pub(super) async fn get_deploy_accepted_by_hash( +pub(super) async fn get_transaction_accepted_by_hash( + transaction_type: TransactionTypeIdFilter, hash: String, db: Db, -) -> Result { +) -> Result, Rejection> { check_hash_is_correct_format(&hash)?; - let db_result = db.get_deploy_accepted_by_hash(&hash).await; + let db_result = db + .get_transaction_accepted_by_hash(&transaction_type.into(), &hash) + .await; format_or_reject_storage_result(db_result) } -pub(super) async fn get_deploy_processed_by_hash( +pub(super) async fn get_transaction_processed_by_hash( + transaction_type: TransactionTypeIdFilter, hash: String, db: Db, ) -> Result { check_hash_is_correct_format(&hash)?; - let db_result = db.get_deploy_processed_by_hash(&hash).await; + let db_result = db + .get_transaction_processed_by_hash(&transaction_type.into(), &hash) + .await; format_or_reject_storage_result(db_result) } -pub(super) async fn get_deploy_expired_by_hash( +pub(super) async fn get_transaction_expired_by_hash( + transaction_type: TransactionTypeIdFilter, hash: String, db: Db, ) -> Result { check_hash_is_correct_format(&hash)?; - let db_result = db.get_deploy_expired_by_hash(&hash).await; + let db_result = db + .get_transaction_expired_by_hash(&transaction_type.into(), &hash) + .await; format_or_reject_storage_result(db_result) } @@ -104,7 +118,7 @@ pub(super) async fn get_finality_signatures_by_block( storage_result: Result, -) -> Result +) -> Result, Rejection> where T: Serialize, { diff --git a/event_sidecar/src/rest_server/metrics_layer.rs b/event_sidecar/src/rest_server/metrics_layer.rs new file mode 100644 index 00000000..fdb666e3 --- /dev/null +++ b/event_sidecar/src/rest_server/metrics_layer.rs @@ -0,0 +1,143 @@ +use derive_new::new; +use futures::{ready, Future}; +use http::{Request, Response}; +use hyper::Body; +use metrics::rest_api::{dec_connected_clients, inc_connected_clients, observe_response_time}; +use pin_project::{pin_project, pinned_drop}; +use std::{ + pin::Pin, + sync::atomic::{AtomicBool, Ordering}, + task::{Context, Poll}, + time::Instant, +}; +use tower::{Layer, Service}; + +/// Warp layer that records metrics for each request. +/// +/// For now this supports only services that are bound by a Request as input and Response as output. +/// This is due to the fact that we use the Request's `uri()` and Responses `status()` to give context to the observed metrics. +#[derive(new, Debug, Clone)] +pub struct MetricsLayer { + metrics_abstractor: fn(&str) -> String, +} + +impl Layer for MetricsLayer { + type Service = MetricsService; + + fn layer(&self, service: S) -> Self::Service { + MetricsService::new(service, self.metrics_abstractor) + } +} + +#[derive(Debug, Clone)] +pub struct MetricsService { + inner: S, + metrics_abstractor: fn(&str) -> String, +} + +impl MetricsService { + pub fn new(inner: S, metrics_abstractor: fn(&str) -> String) -> Self { + MetricsService { + inner, + metrics_abstractor, + } + } +} + +impl Service> for MetricsService +where + S: Service, Response = Response> + 'static, +{ + type Response = Response; + type Error = S::Error; + type Future = MetricsFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, request: Request) -> Self::Future { + let metrics_abstraction = (self.metrics_abstractor)(request.uri().path()); + let future = self.inner.call(request); + MetricsFuture::new(future, metrics_abstraction) + } +} + +#[pin_project(PinnedDrop)] +pub struct MetricsFuture +where + F: Future, ErrorType>>, +{ + #[pin] + inner: F, + #[pin] + connection_end_observed: AtomicBool, + start: Instant, + metrics_category: String, +} + +#[pinned_drop] +impl PinnedDrop for MetricsFuture +where + T: Future, X>>, +{ + fn drop(self: Pin<&mut Self>) { + self.observe_end(); + } +} + +impl MetricsFuture +where + T: Future, X>>, +{ + pub fn new(inner: T, metrics_category: String) -> Self { + let start = Instant::now(); + inc_connected_clients(); + MetricsFuture { + inner, + connection_end_observed: AtomicBool::new(false), + start, + metrics_category, + } + } + + fn observe_end(self: Pin<&mut Self>) { + let metrics_category = self.metrics_category.clone(); + let duration = self.start.elapsed(); + let got = self + .project() + .connection_end_observed + .swap(true, Ordering::Relaxed); + if !got { + dec_connected_clients(); + observe_response_time(&metrics_category, "disconnected", duration); + } + } +} + +impl Future for MetricsFuture +where + F: Future, E>>, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let res = ready!(this.inner.poll(cx)); + let got = this.connection_end_observed.swap(true, Ordering::Relaxed); + if !got { + let metrics_category = this.metrics_category; + let duration = this.start.elapsed(); + dec_connected_clients(); + match &res { + Ok(r) => { + let status = r.status(); + let status = status.as_str(); + observe_response_time(metrics_category, status, duration); + } + Err(_) => observe_response_time(metrics_category, "error", duration), + } + } + Poll::Ready(res) + } +} diff --git a/sidecar/src/rest_server/openapi.rs b/event_sidecar/src/rest_server/openapi.rs similarity index 78% rename from sidecar/src/rest_server/openapi.rs rename to event_sidecar/src/rest_server/openapi.rs index b76f0ee7..6d6a6dfa 100644 --- a/sidecar/src/rest_server/openapi.rs +++ b/event_sidecar/src/rest_server/openapi.rs @@ -1,19 +1,17 @@ mod schema_transformation_visitor; -use crate::types::{ - database::DeployAggregate, - sse_events::{BlockAdded, DeployAccepted, DeployExpired, DeployProcessed, Fault, Step}, -}; -use casper_event_types::{ - block::json_compatibility::{ - JsonBlockBody, JsonBlockHeader, JsonEraEnd, JsonEraReport, JsonProof, Reward, - ValidatorWeight, +use crate::{ + database::types::*, + types::{ + database::TransactionAggregate, + sse_events::{ + BlockAdded, Fault, Step, TransactionAccepted, TransactionExpired, TransactionProcessed, + }, }, - deploy::{Approval, DeployHeader}, - BlockHash, Deploy, DeployHash, Digest, ExecutableDeployItem, FinalitySignature, JsonBlock, }; use casper_types::{ - ContractHash, ContractPackageHash, ContractVersion, ExecutionEffect, ExecutionResult, - RuntimeArgs, + contract_messages::Messages, + execution::{execution_result_v1::ExecutionEffect, ExecutionResult}, + Block, BlockHash, FinalitySignature, RuntimeArgs, Transaction, }; use http::Uri; use schemars::{schema::SchemaObject, schema_for, visit::Visitor}; @@ -38,10 +36,10 @@ use self::schema_transformation_visitor::SchemaTransformationVisitor; paths(crate::rest_server::filters::latest_block, crate::rest_server::filters::block_by_hash, crate::rest_server::filters::block_by_height, - crate::rest_server::filters::deploy_by_hash, - crate::rest_server::filters::deploy_accepted_by_hash, - crate::rest_server::filters::deploy_expired_by_hash, - crate::rest_server::filters::deploy_processed_by_hash, + crate::rest_server::filters::transaction_by_hash, + crate::rest_server::filters::transaction_accepted_by_hash, + crate::rest_server::filters::transaction_expired_by_hash, + crate::rest_server::filters::transaction_processed_by_hash, crate::rest_server::filters::faults_by_public_key, crate::rest_server::filters::faults_by_era, crate::rest_server::filters::finality_signatures_by_block, @@ -50,7 +48,7 @@ use self::schema_transformation_visitor::SchemaTransformationVisitor; ), components( - schemas(Step, FinalitySignature, Fault, DeployExpired, Deploy, DeployHeader, ExecutableDeployItem, Approval, DeployAggregate, DeployAccepted, DeployProcessed, BlockAdded, JsonBlock, BlockHash, JsonEraEnd, JsonEraReport, JsonBlockBody, JsonBlockHeader, JsonProof, Digest, DeployHash, ValidatorWeight, Reward) + schemas(EnvelopeHeader, BlockAddedEnveloped, TransactionAcceptedEnveloped, TransactionExpiredEnveloped, TransactionProcessedEnveloped, FaultEnveloped, FinalitySignatureEnveloped, StepEnveloped, Step, Fault, TransactionExpired, TransactionAggregate, TransactionAccepted, TransactionProcessed, BlockAdded) ), tags( (name = "event-sidecar", description = "Event-sidecar rest API") @@ -64,7 +62,7 @@ impl Modify for AuthorsModification { fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) { let mut contact = Contact::new(); contact.name = Some("Sidecar team".to_string()); - contact.url = Some("https://github.com/CasperLabs/event-sidecar".to_string()); + contact.url = Some("https://github.com/casper-network/casper-sidecar".to_string()); openapi.info.contact = Some(contact); } } @@ -89,15 +87,17 @@ pub fn build_open_api_filters( extend_open_api_with_schemars_schemas( &mut components, vec![ - ("ExecutionResult".to_string(), schema_for!(ExecutionResult)), + ("Block".to_string(), schema_for!(Block)), + ("BlockHash".to_string(), schema_for!(BlockHash)), ("RuntimeArgs".to_string(), schema_for!(RuntimeArgs)), - ("ContractHash".to_string(), schema_for!(ContractHash)), ( - "ContractPackageHash".to_string(), - schema_for!(ContractPackageHash), + "FinalitySignature".to_string(), + schema_for!(FinalitySignature), ), - ("ContractVersion".to_string(), schema_for!(ContractVersion)), ("ExecutionEffect".to_string(), schema_for!(ExecutionEffect)), + ("Transaction".to_string(), schema_for!(Transaction)), + ("ExecutionResult".to_string(), schema_for!(ExecutionResult)), + ("Messages".to_string(), schema_for!(Messages)), ], ); doc.components = Some(components); diff --git a/sidecar/src/rest_server/openapi/schema_transformation_visitor.rs b/event_sidecar/src/rest_server/openapi/schema_transformation_visitor.rs similarity index 100% rename from sidecar/src/rest_server/openapi/schema_transformation_visitor.rs rename to event_sidecar/src/rest_server/openapi/schema_transformation_visitor.rs diff --git a/event_sidecar/src/rest_server/status.rs b/event_sidecar/src/rest_server/status.rs new file mode 100644 index 00000000..405414dd --- /dev/null +++ b/event_sidecar/src/rest_server/status.rs @@ -0,0 +1,22 @@ +use casper_event_types::SIDECAR_VERSION; +use casper_types::ProtocolVersion; +use http::StatusCode; +use serde::Serialize; +use warp::{reject::Rejection, reply::Reply, Filter}; + +#[derive(Clone, Debug, Serialize)] +struct SidecarStatus { + version: ProtocolVersion, +} + +pub fn status_filters() -> impl Filter + Clone { + warp::path!("status").and(warp::get()).and_then(get_status) +} + +pub(super) async fn get_status() -> Result { + let data = SidecarStatus { + version: *SIDECAR_VERSION, + }; + let json = warp::reply::json(&data); + Ok(warp::reply::with_status(json, StatusCode::OK).into_response()) +} diff --git a/sidecar/src/rest_server/tests.rs b/event_sidecar/src/rest_server/tests.rs similarity index 63% rename from sidecar/src/rest_server/tests.rs rename to event_sidecar/src/rest_server/tests.rs index 9c405104..8235761e 100644 --- a/sidecar/src/rest_server/tests.rs +++ b/event_sidecar/src/rest_server/tests.rs @@ -1,17 +1,17 @@ -use casper_event_types::FinalitySignature as FinSig; -use casper_types::AsymmetricType; +use casper_types::{AsymmetricType, FinalitySignature as FinSig}; use http::StatusCode; use warp::test::request; use super::filters; +use crate::database::types::SseEnvelope; use crate::{ testing::fake_database::FakeDatabase, - types::{database::DeployAggregate, sse_events::*}, + types::{database::TransactionAggregate, sse_events::*}, }; // Path elements const BLOCK: &str = "block"; -const DEPLOY: &str = "deploy"; +const TRANSACTION: &str = "transaction"; const FAULTS: &str = "faults"; const SIGNATURES: &str = "signatures"; const STEP: &str = "step"; @@ -43,9 +43,13 @@ async fn root_should_return_400() { #[tokio::test] async fn root_with_invalid_path_should_return_400() { - should_respond_to_path_with("/not_block_or_deploy".to_string(), StatusCode::BAD_REQUEST).await; should_respond_to_path_with( - "/not_block_or_deploy/extra".to_string(), + "/not_block_or_transaction".to_string(), + StatusCode::BAD_REQUEST, + ) + .await; + should_respond_to_path_with( + "/not_block_or_transaction/extra".to_string(), StatusCode::BAD_REQUEST, ) .await; @@ -66,7 +70,8 @@ async fn block_root_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - serde_json::from_slice::(&body).expect("Error parsing BlockAdded from response"); + serde_json::from_slice::>(&body) + .expect("Error parsing BlockAdded from response"); } #[tokio::test] @@ -87,10 +92,15 @@ async fn block_by_hash_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let block_added = serde_json::from_slice::(&body) + let block_added = serde_json::from_slice::>(&body) .expect("Error parsing BlockAdded from response"); - assert_eq!(block_added.hex_encoded_hash(), identifiers.block_added_hash); + assert_eq!( + block_added.payload().hex_encoded_hash(), + identifiers.block_added_hash + ); + assert_eq!(block_added.network_name(), "network-1"); + assert_eq!(block_added.api_version(), "2.0.0"); } #[tokio::test] @@ -111,14 +121,19 @@ async fn block_by_height_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let block_added = serde_json::from_slice::(&body) + let block_added = serde_json::from_slice::>(&body) .expect("Error parsing BlockAdded from response"); - assert_eq!(block_added.get_height(), identifiers.block_added_height); + assert_eq!( + block_added.payload().get_height(), + identifiers.block_added_height + ); + assert_eq!(block_added.network_name(), "network-1"); + assert_eq!(block_added.api_version(), "2.0.0"); } #[tokio::test] -async fn deploy_by_hash_should_return_valid_data() { +async fn transaction_by_hash_should_return_valid_data() { let database = FakeDatabase::new(); let identifiers = database @@ -128,24 +143,22 @@ async fn deploy_by_hash_should_return_valid_data() { let api = filters::combined_filters(database); - let request_path = format!("/{}/{}", DEPLOY, identifiers.deploy_accepted_hash); + let (transaction_hash, transaction_type) = identifiers.transaction_accepted_info; + let request_path = format!("/{}/{}/{}", TRANSACTION, transaction_type, transaction_hash); let response = request().path(&request_path).reply(&api).await; assert!(response.status().is_success()); let body = response.into_body(); - let deploy_aggregate = serde_json::from_slice::(&body) - .expect("Error parsing AggregateDeployInfo from response"); + let transaction_aggregate = serde_json::from_slice::(&body) + .expect("Error parsing AggregateTransactionInfo from response"); - assert_eq!( - deploy_aggregate.deploy_hash, - identifiers.deploy_accepted_hash - ); + assert_eq!(transaction_aggregate.transaction_hash, transaction_hash); } #[tokio::test] -async fn deploy_accepted_by_hash_should_return_valid_data() { +async fn transaction_accepted_by_hash_should_return_valid_data() { let database = FakeDatabase::new(); let identifiers = database @@ -155,9 +168,10 @@ async fn deploy_accepted_by_hash_should_return_valid_data() { let api = filters::combined_filters(database); + let (transaction_hash, transaction_type) = identifiers.transaction_accepted_info; let request_path = format!( - "/{}/{}/{}", - DEPLOY, ACCEPTED, identifiers.deploy_accepted_hash + "/{}/{}/{}/{}", + TRANSACTION, ACCEPTED, transaction_type, transaction_hash ); let response = request().path(&request_path).reply(&api).await; @@ -165,17 +179,19 @@ async fn deploy_accepted_by_hash_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let deploy_accepted = serde_json::from_slice::(&body) - .expect("Error parsing DeployAccepted from response"); + let transaction_accepted = serde_json::from_slice::>(&body) + .expect("Error parsing TransactionAccepted from response"); assert_eq!( - deploy_accepted.hex_encoded_hash(), - identifiers.deploy_accepted_hash + transaction_accepted.payload().hex_encoded_hash(), + transaction_hash ); + assert_eq!(transaction_accepted.network_name(), "network-1"); + assert_eq!(transaction_accepted.api_version(), "2.0.0"); } #[tokio::test] -async fn deploy_processed_by_hash_should_return_valid_data() { +async fn transaction_processed_by_hash_should_return_valid_data() { let database = FakeDatabase::new(); let identifiers = database @@ -184,10 +200,10 @@ async fn deploy_processed_by_hash_should_return_valid_data() { .expect("Error populating FakeDatabase"); let api = filters::combined_filters(database); - + let (transaction_hash, transaction_type) = identifiers.transaction_processed_info; let request_path = format!( - "/{}/{}/{}", - DEPLOY, PROCESSED, identifiers.deploy_processed_hash + "/{}/{}/{}/{}", + TRANSACTION, PROCESSED, transaction_type, transaction_hash ); let response = request().path(&request_path).reply(&api).await; @@ -195,17 +211,19 @@ async fn deploy_processed_by_hash_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let deploy_processed = serde_json::from_slice::(&body) - .expect("Error parsing DeployProcessed from response"); + let transaction_processed = serde_json::from_slice::>(&body) + .expect("Error parsing TransactionProcessed from response"); assert_eq!( - deploy_processed.hex_encoded_hash(), - identifiers.deploy_processed_hash + transaction_processed.payload().hex_encoded_hash(), + transaction_hash ); + assert_eq!(transaction_processed.network_name(), "network-1"); + assert_eq!(transaction_processed.api_version(), "2.0.0"); } #[tokio::test] -async fn deploy_expired_by_hash_should_return_valid_data() { +async fn transaction_expired_by_hash_should_return_valid_data() { let database = FakeDatabase::new(); let identifiers = database @@ -214,10 +232,10 @@ async fn deploy_expired_by_hash_should_return_valid_data() { .expect("Error populating FakeDatabase"); let api = filters::combined_filters(database); - + let (transaction_hash, transaction_type) = identifiers.transaction_expired_info; let request_path = format!( - "/{}/{}/{}", - DEPLOY, EXPIRED, identifiers.deploy_expired_hash + "/{}/{}/{}/{}", + TRANSACTION, EXPIRED, transaction_type, transaction_hash ); let response = request().path(&request_path).reply(&api).await; @@ -225,13 +243,15 @@ async fn deploy_expired_by_hash_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let deploy_expired = serde_json::from_slice::(&body) - .expect("Error parsing DeployExpired from response"); + let transaction_expired = serde_json::from_slice::>(&body) + .expect("Error parsing TransactionExpired from response"); assert_eq!( - deploy_expired.hex_encoded_hash(), - identifiers.deploy_expired_hash + transaction_expired.payload().hex_encoded_hash(), + transaction_hash ); + assert_eq!(transaction_expired.network_name(), "network-1"); + assert_eq!(transaction_expired.api_version(), "2.0.0"); } #[tokio::test] @@ -252,9 +272,12 @@ async fn step_by_era_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let step = serde_json::from_slice::(&body).expect("Error parsing Step from response"); + let step = serde_json::from_slice::>(&body) + .expect("Error parsing Step from response"); - assert_eq!(step.era_id.value(), identifiers.step_era_id); + assert_eq!(step.payload().era_id.value(), identifiers.step_era_id); + assert_eq!(step.network_name(), "network-1"); + assert_eq!(step.api_version(), "2.0.0"); } #[tokio::test] @@ -275,10 +298,16 @@ async fn faults_by_public_key_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let faults = - serde_json::from_slice::>(&body).expect("Error parsing Fault from response"); + let faults = serde_json::from_slice::>>(&body) + .expect("Error parsing Fault from response"); - assert_eq!(faults[0].public_key.to_hex(), identifiers.fault_public_key); + let fault = &faults[0]; + assert_eq!( + fault.payload().public_key.to_hex(), + identifiers.fault_public_key + ); + assert_eq!(fault.network_name(), "network-1"); + assert_eq!(fault.api_version(), "2.0.0"); } #[tokio::test] @@ -299,10 +328,13 @@ async fn faults_by_era_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let faults = - serde_json::from_slice::>(&body).expect("Error parsing Fault from response"); + let faults = serde_json::from_slice::>>(&body) + .expect("Error parsing Fault from response"); - assert_eq!(faults[0].era_id.value(), identifiers.fault_era_id); + let fault = &faults[0]; + assert_eq!(fault.payload().era_id.value(), identifiers.fault_era_id); + assert_eq!(fault.network_name(), "network-1"); + assert_eq!(fault.api_version(), "2.0.0"); } #[tokio::test] @@ -326,13 +358,15 @@ async fn finality_signatures_by_block_should_return_valid_data() { assert!(response.status().is_success()); let body = response.into_body(); - let finality_signatures = serde_json::from_slice::>(&body) + let finality_signatures = serde_json::from_slice::>>(&body) .expect("Error parsing FinalitySignatures from response"); - + let finality_signature = &finality_signatures[0]; assert_eq!( - hex::encode(finality_signatures[0].block_hash().inner()), + hex::encode(finality_signature.payload().block_hash().inner()), identifiers.finality_signatures_block_hash ); + assert_eq!(finality_signature.api_version(), "2.0.0"); + assert_eq!(finality_signature.network_name(), "network-1"); } #[tokio::test] @@ -350,29 +384,28 @@ async fn block_by_height_of_not_stored_should_return_404() { } #[tokio::test] -async fn deploy_by_hash_of_not_stored_should_return_404() { - let request_path = format!("/{}/{}", DEPLOY, VALID_HASH); +async fn transaction_by_hash_of_not_stored_should_return_404() { + let request_path = format!("/{}/deploy/{}", TRANSACTION, VALID_HASH); should_respond_to_path_with(request_path, StatusCode::NOT_FOUND).await } #[tokio::test] -async fn deploy_accepted_by_hash_of_not_stored_should_return_404() { - let request_path = format!("/{}/{}/{}", DEPLOY, ACCEPTED, VALID_HASH); - +async fn transaction_accepted_by_hash_of_not_stored_should_return_404() { + let request_path = format!("/{}/{}/version1/{}", TRANSACTION, ACCEPTED, VALID_HASH); should_respond_to_path_with(request_path, StatusCode::NOT_FOUND).await } #[tokio::test] -async fn deploy_processed_by_hash_of_not_stored_should_return_404() { - let request_path = format!("/{}/{}/{}", DEPLOY, PROCESSED, VALID_HASH); +async fn transaction_processed_by_hash_of_not_stored_should_return_404() { + let request_path = format!("/{}/{}/deploy/{}", TRANSACTION, PROCESSED, VALID_HASH); should_respond_to_path_with(request_path, StatusCode::NOT_FOUND).await } #[tokio::test] -async fn deploy_expired_by_hash_of_not_stored_should_return_404() { - let request_path = format!("/{}/{}/{}", DEPLOY, EXPIRED, VALID_HASH); +async fn transaction_expired_by_hash_of_not_stored_should_return_404() { + let request_path = format!("/{}/{}/deploy/{}", TRANSACTION, EXPIRED, VALID_HASH); should_respond_to_path_with(request_path, StatusCode::NOT_FOUND).await } @@ -413,29 +446,29 @@ async fn block_by_invalid_hash_should_return_400() { } #[tokio::test] -async fn deploy_by_hash_of_invalid_should_return_400() { - let request_path = format!("/{}/{}", DEPLOY, INVALID_HASH); +async fn transaction_by_hash_of_invalid_should_return_400() { + let request_path = format!("/{}/{}", TRANSACTION, INVALID_HASH); should_respond_to_path_with(request_path, StatusCode::BAD_REQUEST).await } #[tokio::test] -async fn deploy_accepted_by_hash_of_invalid_should_return_400() { - let request_path = format!("/{}/{}/{}", DEPLOY, ACCEPTED, INVALID_HASH); +async fn transaction_accepted_by_hash_of_invalid_should_return_400() { + let request_path = format!("/{}/{}/deploy/{}", TRANSACTION, ACCEPTED, INVALID_HASH); should_respond_to_path_with(request_path, StatusCode::BAD_REQUEST).await } #[tokio::test] -async fn deploy_processed_by_hash_of_invalid_should_return_400() { - let request_path = format!("/{}/{}/{}", DEPLOY, PROCESSED, INVALID_HASH); +async fn transaction_processed_by_hash_of_invalid_should_return_400() { + let request_path = format!("/{}/{}/deploy/{}", TRANSACTION, PROCESSED, INVALID_HASH); should_respond_to_path_with(request_path, StatusCode::BAD_REQUEST).await } #[tokio::test] -async fn deploy_expired_by_hash_of_invalid_should_return_400() { - let request_path = format!("/{}/{}/{}", DEPLOY, EXPIRED, INVALID_HASH); +async fn transaction_expired_by_hash_of_invalid_should_return_400() { + let request_path = format!("/{}/{}/deploy/{}", TRANSACTION, EXPIRED, INVALID_HASH); should_respond_to_path_with(request_path, StatusCode::BAD_REQUEST).await } diff --git a/sidecar/src/sql.rs b/event_sidecar/src/sql.rs similarity index 100% rename from sidecar/src/sql.rs rename to event_sidecar/src/sql.rs diff --git a/sidecar/src/sql/tables.rs b/event_sidecar/src/sql/tables.rs similarity index 51% rename from sidecar/src/sql/tables.rs rename to event_sidecar/src/sql/tables.rs index be12e0bb..03d323d6 100644 --- a/sidecar/src/sql/tables.rs +++ b/event_sidecar/src/sql/tables.rs @@ -1,8 +1,4 @@ pub mod block_added; -pub mod deploy_accepted; -pub mod deploy_event; -pub mod deploy_expired; -pub mod deploy_processed; pub mod event_log; pub mod event_type; pub mod fault; @@ -10,3 +6,8 @@ pub mod finality_signature; pub mod migration; pub mod shutdown; pub mod step; +pub mod transaction_accepted; +pub mod transaction_event; +pub mod transaction_expired; +pub mod transaction_processed; +pub mod transaction_type; diff --git a/sidecar/src/sql/tables/block_added.rs b/event_sidecar/src/sql/tables/block_added.rs similarity index 78% rename from sidecar/src/sql/tables/block_added.rs rename to event_sidecar/src/sql/tables/block_added.rs index b7b1be2e..b27ac285 100644 --- a/sidecar/src/sql/tables/block_added.rs +++ b/event_sidecar/src/sql/tables/block_added.rs @@ -76,7 +76,14 @@ pub fn create_insert_stmt( pub fn create_get_by_hash_stmt(block_hash: String) -> SelectStatement { Query::select() .column(BlockAdded::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(BlockAdded::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((BlockAdded::Table, BlockAdded::EventLogId)), + ) .and_where(Expr::col(BlockAdded::BlockHash).eq(block_hash)) .to_owned() } @@ -84,7 +91,14 @@ pub fn create_get_by_hash_stmt(block_hash: String) -> SelectStatement { pub fn create_get_by_height_stmt(height: u64) -> SelectStatement { Query::select() .column(BlockAdded::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(BlockAdded::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((BlockAdded::Table, BlockAdded::EventLogId)), + ) .and_where(Expr::col(BlockAdded::Height).eq(height)) .to_owned() } @@ -96,7 +110,14 @@ pub fn create_get_latest_stmt() -> SelectStatement { .to_owned(); Query::select() .column(BlockAdded::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(BlockAdded::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((BlockAdded::Table, BlockAdded::EventLogId)), + ) .and_where(Expr::col(BlockAdded::Height).in_subquery(select_max)) .to_owned() } diff --git a/sidecar/src/sql/tables/event_log.rs b/event_sidecar/src/sql/tables/event_log.rs similarity index 56% rename from sidecar/src/sql/tables/event_log.rs rename to event_sidecar/src/sql/tables/event_log.rs index cb404d2f..90525d6a 100644 --- a/sidecar/src/sql/tables/event_log.rs +++ b/event_sidecar/src/sql/tables/event_log.rs @@ -5,6 +5,7 @@ use sea_query::{ use super::event_type::EventType; +#[allow(clippy::enum_variant_names)] #[derive(Iden)] pub enum EventLog { Table, @@ -15,21 +16,22 @@ pub enum EventLog { EventKey, InsertedTimestamp, EmittedTimestamp, + ApiVersion, + NetworkName, } #[allow(clippy::too_many_lines)] -pub fn create_table_stmt(is_big_integer_id: bool) -> TableCreateStatement { - let mut binding = ColumnDef::new(EventLog::EventLogId); - let mut event_log_id_col_definition = binding.auto_increment().not_null().primary_key(); - if is_big_integer_id { - event_log_id_col_definition = event_log_id_col_definition.big_integer(); - } else { - event_log_id_col_definition = event_log_id_col_definition.integer(); - } +pub fn create_table_stmt() -> TableCreateStatement { Table::create() .table(EventLog::Table) .if_not_exists() - .col(event_log_id_col_definition) + .col( + ColumnDef::new(EventLog::EventLogId) + .big_integer() + .auto_increment() + .not_null() + .primary_key(), + ) .col( ColumnDef::new(EventLog::EventTypeId) .tiny_unsigned() @@ -58,6 +60,8 @@ pub fn create_table_stmt(is_big_integer_id: bool) -> TableCreateStatement { // This can be replaced with better syntax when https://github.com/SeaQL/sea-query/pull/428 merges. .extra("DEFAULT CURRENT_TIMESTAMP".to_string()), ) + .col(ColumnDef::new(EventLog::ApiVersion).string().not_null()) + .col(ColumnDef::new(EventLog::NetworkName).string().not_null()) .foreign_key( ForeignKey::create() .name("FK_event_type_id") @@ -83,6 +87,8 @@ pub fn create_insert_stmt( event_source_address: &str, event_id: u32, event_key: &str, + api_version: &str, + network_name: &str, ) -> SqResult { let insert_stmt = Query::insert() .into_table(EventLog::Table) @@ -91,12 +97,16 @@ pub fn create_insert_stmt( EventLog::EventSourceAddress, EventLog::EventId, EventLog::EventKey, + EventLog::ApiVersion, + EventLog::NetworkName, ]) .values(vec![ event_type_id.into(), event_source_address.into(), event_id.into(), event_key.into(), + api_version.into(), + network_name.into(), ]) .map(|stmt| stmt.returning_col(EventLog::EventLogId).to_owned())?; @@ -109,3 +119,23 @@ pub fn count() -> SelectStatement { .from(EventLog::Table) .to_owned() } + +#[cfg(test)] +mod tests { + use super::*; + use sea_query::{PostgresQueryBuilder, SqliteQueryBuilder}; + + #[test] + fn should_prepare_create_stmt_for_sqlite() { + let expected_sql = r#"CREATE TABLE IF NOT EXISTS "event_log" ( "event_log_id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "event_type_id" integer NOT NULL, "event_source_address" text NOT NULL, "event_id" bigint NOT NULL, "event_key" text NOT NULL, "inserted_timestamp" text NOT NULL DEFAULT CURRENT_TIMESTAMP, "emitted_timestamp" text NOT NULL DEFAULT CURRENT_TIMESTAMP, "api_version" text NOT NULL, "network_name" text NOT NULL, CONSTRAINT "UDX_event_log" UNIQUE ("event_source_address", "event_id", "event_type_id", "event_key"), FOREIGN KEY ("event_type_id") REFERENCES "event_type" ("event_type_id") ON DELETE RESTRICT ON UPDATE RESTRICT )"#; + let stmt = create_table_stmt().to_string(SqliteQueryBuilder); + assert_eq!(stmt.to_string(), expected_sql); + } + + #[test] + fn should_prepare_create_stmt_for_postgres() { + let expected_sql = r#"CREATE TABLE IF NOT EXISTS "event_log" ( "event_log_id" bigserial NOT NULL PRIMARY KEY, "event_type_id" smallint NOT NULL, "event_source_address" varchar NOT NULL, "event_id" bigint NOT NULL, "event_key" varchar NOT NULL, "inserted_timestamp" timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, "emitted_timestamp" timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, "api_version" varchar NOT NULL, "network_name" varchar NOT NULL, CONSTRAINT "UDX_event_log" UNIQUE ("event_source_address", "event_id", "event_type_id", "event_key"), CONSTRAINT "FK_event_type_id" FOREIGN KEY ("event_type_id") REFERENCES "event_type" ("event_type_id") ON DELETE RESTRICT ON UPDATE RESTRICT )"#; + let stmt = create_table_stmt().to_string(PostgresQueryBuilder); + assert_eq!(stmt.to_string(), expected_sql,); + } +} diff --git a/sidecar/src/sql/tables/event_type.rs b/event_sidecar/src/sql/tables/event_type.rs similarity index 75% rename from sidecar/src/sql/tables/event_type.rs rename to event_sidecar/src/sql/tables/event_type.rs index 84dd8e26..838211c0 100644 --- a/sidecar/src/sql/tables/event_type.rs +++ b/event_sidecar/src/sql/tables/event_type.rs @@ -3,6 +3,7 @@ use sea_query::{ TableCreateStatement, }; +#[allow(clippy::enum_variant_names)] #[derive(Iden)] pub(super) enum EventType { Table, @@ -12,9 +13,9 @@ pub(super) enum EventType { pub enum EventTypeId { BlockAdded = 1, - DeployAccepted = 2, - DeployExpired = 3, - DeployProcessed = 4, + TransactionAccepted = 2, + TransactionExpired = 3, + TransactionProcessed = 4, Fault = 5, FinalitySignature = 6, Step = 7, @@ -49,16 +50,16 @@ pub fn create_initialise_stmt() -> SqResult { "BlockAdded".into(), ])? .values(vec![ - (EventTypeId::DeployAccepted as u8).into(), - "DeployAccepted".into(), + (EventTypeId::TransactionAccepted as u8).into(), + "TransactionAccepted".into(), ])? .values(vec![ - (EventTypeId::DeployExpired as u8).into(), - "DeployExpired".into(), + (EventTypeId::TransactionExpired as u8).into(), + "TransactionExpired".into(), ])? .values(vec![ - (EventTypeId::DeployProcessed as u8).into(), - "DeployProcessed".into(), + (EventTypeId::TransactionProcessed as u8).into(), + "TransactionProcessed".into(), ])? .values(vec![(EventTypeId::Fault as u8).into(), "Fault".into()])? .values(vec![ @@ -81,7 +82,7 @@ pub fn create_initialise_stmt() -> SqResult { #[test] fn create_initialise_stmt_sql() { use sea_query::SqliteQueryBuilder; - let expected_sql = "INSERT INTO \"event_type\" (\"event_type_id\", \"event_type_name\") VALUES (1, 'BlockAdded'), (2, 'DeployAccepted'), (3, 'DeployExpired'), (4, 'DeployProcessed'), (5, 'Fault'), (6, 'FinalitySignature'), (7, 'Step'), (8, 'Shutdown') ON CONFLICT (\"event_type_id\") DO NOTHING"; + let expected_sql = "INSERT INTO \"event_type\" (\"event_type_id\", \"event_type_name\") VALUES (1, 'BlockAdded'), (2, 'TransactionAccepted'), (3, 'TransactionExpired'), (4, 'TransactionProcessed'), (5, 'Fault'), (6, 'FinalitySignature'), (7, 'Step'), (8, 'Shutdown') ON CONFLICT (\"event_type_id\") DO NOTHING"; let got_sql = create_initialise_stmt() .unwrap() diff --git a/sidecar/src/sql/tables/fault.rs b/event_sidecar/src/sql/tables/fault.rs similarity index 82% rename from sidecar/src/sql/tables/fault.rs rename to event_sidecar/src/sql/tables/fault.rs index 72e83216..e0081850 100644 --- a/sidecar/src/sql/tables/fault.rs +++ b/event_sidecar/src/sql/tables/fault.rs @@ -69,7 +69,14 @@ pub fn create_insert_stmt( pub fn create_get_faults_by_public_key_stmt(public_key: String) -> SelectStatement { Query::select() .column(Fault::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(Fault::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((Fault::Table, Fault::EventLogId)), + ) .and_where(Expr::col(Fault::PublicKey).eq(public_key)) .to_owned() } @@ -77,7 +84,14 @@ pub fn create_get_faults_by_public_key_stmt(public_key: String) -> SelectStateme pub fn create_get_faults_by_era_stmt(era: u64) -> SelectStatement { Query::select() .column(Fault::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(Fault::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((Fault::Table, Fault::EventLogId)), + ) .and_where(Expr::col(Fault::Era).eq(era)) .to_owned() } diff --git a/sidecar/src/sql/tables/finality_signature.rs b/event_sidecar/src/sql/tables/finality_signature.rs similarity index 89% rename from sidecar/src/sql/tables/finality_signature.rs rename to event_sidecar/src/sql/tables/finality_signature.rs index afab0d34..ddb1fa37 100644 --- a/sidecar/src/sql/tables/finality_signature.rs +++ b/event_sidecar/src/sql/tables/finality_signature.rs @@ -79,7 +79,14 @@ pub fn create_insert_stmt( pub fn create_get_finality_signatures_by_block_stmt(block_hash: String) -> SelectStatement { Query::select() .column(FinalitySignature::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(FinalitySignature::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((FinalitySignature::Table, FinalitySignature::EventLogId)), + ) .and_where(Expr::col(FinalitySignature::BlockHash).eq(block_hash)) .to_owned() } diff --git a/sidecar/src/sql/tables/migration.rs b/event_sidecar/src/sql/tables/migration.rs similarity index 100% rename from sidecar/src/sql/tables/migration.rs rename to event_sidecar/src/sql/tables/migration.rs diff --git a/sidecar/src/sql/tables/shutdown.rs b/event_sidecar/src/sql/tables/shutdown.rs similarity index 98% rename from sidecar/src/sql/tables/shutdown.rs rename to event_sidecar/src/sql/tables/shutdown.rs index 547bf542..7057c3d6 100644 --- a/sidecar/src/sql/tables/shutdown.rs +++ b/event_sidecar/src/sql/tables/shutdown.rs @@ -5,6 +5,7 @@ use sea_query::{ use super::event_log::EventLog; +#[allow(clippy::enum_variant_names)] #[derive(Iden)] pub(crate) enum Shutdown { #[iden = "Shutdown"] diff --git a/sidecar/src/sql/tables/step.rs b/event_sidecar/src/sql/tables/step.rs similarity index 87% rename from sidecar/src/sql/tables/step.rs rename to event_sidecar/src/sql/tables/step.rs index c92c24b9..1056a4a3 100644 --- a/sidecar/src/sql/tables/step.rs +++ b/event_sidecar/src/sql/tables/step.rs @@ -51,7 +51,14 @@ pub fn create_insert_stmt(era: u64, raw: String, event_log_id: u64) -> SqResult< pub fn create_get_by_era_stmt(era: u64) -> SelectStatement { Query::select() .column(Step::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) .from(Step::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((Step::Table, Step::EventLogId)), + ) .and_where(Expr::col(Step::Era).eq(era)) .to_owned() } diff --git a/event_sidecar/src/sql/tables/transaction_accepted.rs b/event_sidecar/src/sql/tables/transaction_accepted.rs new file mode 100644 index 00000000..ebeec227 --- /dev/null +++ b/event_sidecar/src/sql/tables/transaction_accepted.rs @@ -0,0 +1,113 @@ +use sea_query::{ + error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, + InsertStatement, Query, SelectStatement, Table, TableCreateStatement, +}; + +use super::{event_log::EventLog, transaction_type::TransactionType}; + +#[derive(Iden)] +pub(super) enum TransactionAccepted { + #[iden = "TransactionAccepted"] + Table, + TransactionHash, + TransactionTypeId, + Raw, + EventLogId, +} + +pub fn create_table_stmt() -> TableCreateStatement { + Table::create() + .table(TransactionAccepted::Table) + .if_not_exists() + .col( + ColumnDef::new(TransactionAccepted::TransactionHash) + .string() + .not_null(), + ) + .col( + ColumnDef::new(TransactionAccepted::TransactionTypeId) + .tiny_unsigned() + .not_null(), + ) + .col(ColumnDef::new(TransactionAccepted::Raw).text().not_null()) + .col( + ColumnDef::new(TransactionAccepted::EventLogId) + .big_unsigned() + .not_null(), + ) + .index(&mut primary_key()) + .foreign_key(&mut event_log_fk()) + .foreign_key(&mut transaction_type_fk()) + .to_owned() +} + +fn transaction_type_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_transaction_type_id") + .from( + TransactionAccepted::Table, + TransactionAccepted::TransactionTypeId, + ) + .to(TransactionType::Table, TransactionType::TransactionTypeId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn event_log_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_event_log_id") + .from(TransactionAccepted::Table, TransactionAccepted::EventLogId) + .to(EventLog::Table, EventLog::EventLogId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn primary_key() -> sea_query::IndexCreateStatement { + Index::create() + .name("PDX_TransactionAccepted") + .col(TransactionAccepted::TransactionTypeId) + .col(TransactionAccepted::TransactionHash) + .primary() + .to_owned() +} + +pub fn create_insert_stmt( + transaction_type: u8, + transaction_hash: String, + raw: String, + event_log_id: u64, +) -> SqResult { + Query::insert() + .into_table(TransactionAccepted::Table) + .columns([ + TransactionAccepted::TransactionTypeId, + TransactionAccepted::TransactionHash, + TransactionAccepted::Raw, + TransactionAccepted::EventLogId, + ]) + .values(vec![ + transaction_type.into(), + transaction_hash.into(), + raw.into(), + event_log_id.into(), + ]) + .map(|stmt| stmt.to_owned()) +} + +pub fn create_get_by_hash_stmt(transaction_type: u8, transaction_hash: String) -> SelectStatement { + Query::select() + .column(TransactionAccepted::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) + .from(TransactionAccepted::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((TransactionAccepted::Table, TransactionAccepted::EventLogId)), + ) + .and_where(Expr::col(TransactionAccepted::TransactionTypeId).eq(transaction_type)) + .and_where(Expr::col(TransactionAccepted::TransactionHash).eq(transaction_hash)) + .to_owned() +} diff --git a/event_sidecar/src/sql/tables/transaction_event.rs b/event_sidecar/src/sql/tables/transaction_event.rs new file mode 100644 index 00000000..68ca4d5f --- /dev/null +++ b/event_sidecar/src/sql/tables/transaction_event.rs @@ -0,0 +1,91 @@ +use sea_query::{ + error::Result as SqResult, ColumnDef, ForeignKey, ForeignKeyAction, Iden, Index, + InsertStatement, Query, Table, TableCreateStatement, +}; + +use super::{event_log::EventLog, transaction_type::TransactionType}; + +#[derive(Iden)] +pub(super) enum TransactionEvent { + Table, + EventLogId, + TransactionTypeId, + TransactionHash, +} + +pub fn create_table_stmt() -> TableCreateStatement { + Table::create() + .table(TransactionEvent::Table) + .if_not_exists() + .col( + ColumnDef::new(TransactionEvent::EventLogId) + .big_unsigned() + .not_null(), + ) + .col( + ColumnDef::new(TransactionEvent::TransactionHash) + .string() + .not_null(), + ) + .col( + ColumnDef::new(TransactionEvent::TransactionTypeId) + .tiny_unsigned() + .not_null(), + ) + .index(&mut primary_key()) + .foreign_key(&mut event_log_fk()) + .foreign_key(&mut transaction_type_fk()) + .to_owned() +} + +fn transaction_type_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_transaction_type_id") + .from(TransactionEvent::Table, TransactionEvent::TransactionTypeId) + .to(TransactionType::Table, TransactionType::TransactionTypeId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn event_log_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_event_log_id") + .from(TransactionEvent::Table, TransactionEvent::EventLogId) + .to(EventLog::Table, EventLog::EventLogId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn primary_key() -> sea_query::IndexCreateStatement { + Index::create() + .primary() + .name("PDX_TransactionEvent") + .col(TransactionEvent::TransactionHash) + .col(TransactionEvent::TransactionTypeId) + .col(TransactionEvent::EventLogId) + .to_owned() +} + +pub fn create_insert_stmt( + event_log_id: u64, + transaction_type: u8, + transaction_hash: String, +) -> SqResult { + let insert_stmt = Query::insert() + .into_table(TransactionEvent::Table) + .columns([ + TransactionEvent::TransactionTypeId, + TransactionEvent::EventLogId, + TransactionEvent::TransactionHash, + ]) + .values(vec![ + transaction_type.into(), + event_log_id.into(), + transaction_hash.into(), + ])? + .to_owned(); + + Ok(insert_stmt) +} diff --git a/event_sidecar/src/sql/tables/transaction_expired.rs b/event_sidecar/src/sql/tables/transaction_expired.rs new file mode 100644 index 00000000..736ded04 --- /dev/null +++ b/event_sidecar/src/sql/tables/transaction_expired.rs @@ -0,0 +1,112 @@ +use sea_query::{ + error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, + InsertStatement, Query, SelectStatement, Table, TableCreateStatement, +}; + +use super::{event_log::EventLog, transaction_type::TransactionType}; + +#[derive(Iden)] +pub(super) enum TransactionExpired { + #[iden = "TransactionExpired"] + Table, + TransactionTypeId, + TransactionHash, + Raw, + EventLogId, +} + +pub fn create_table_stmt() -> TableCreateStatement { + Table::create() + .table(TransactionExpired::Table) + .if_not_exists() + .col( + ColumnDef::new(TransactionExpired::TransactionHash) + .string() + .not_null(), + ) + .col( + ColumnDef::new(TransactionExpired::TransactionTypeId) + .tiny_unsigned() + .not_null(), + ) + .col(ColumnDef::new(TransactionExpired::Raw).text().not_null()) + .col( + ColumnDef::new(TransactionExpired::EventLogId) + .big_unsigned() + .not_null(), + ) + .index(&mut primary_key()) + .foreign_key(&mut event_log_fk()) + .foreign_key(&mut transaction_type_fk()) + .to_owned() +} + +fn transaction_type_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_transaction_type_id") + .from( + TransactionExpired::Table, + TransactionExpired::TransactionTypeId, + ) + .to(TransactionType::Table, TransactionType::TransactionTypeId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn event_log_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_event_log_id") + .from(TransactionExpired::Table, TransactionExpired::EventLogId) + .to(EventLog::Table, EventLog::EventLogId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn primary_key() -> sea_query::IndexCreateStatement { + Index::create() + .primary() + .name("PDX_TransactionExpired") + .col(TransactionExpired::TransactionHash) + .to_owned() +} + +pub fn create_insert_stmt( + transaction_type: u8, + transaction_hash: String, + event_log_id: u64, + raw: String, +) -> SqResult { + Query::insert() + .into_table(TransactionExpired::Table) + .columns([ + TransactionExpired::TransactionTypeId, + TransactionExpired::TransactionHash, + TransactionExpired::EventLogId, + TransactionExpired::Raw, + ]) + .values(vec![ + transaction_type.into(), + transaction_hash.into(), + event_log_id.into(), + raw.into(), + ]) + .map(|stmt| stmt.to_owned()) +} + +pub fn create_get_by_hash_stmt(transaction_type: u8, transaction_hash: String) -> SelectStatement { + Query::select() + .column(TransactionExpired::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) + .from(TransactionExpired::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)) + .equals((TransactionExpired::Table, TransactionExpired::EventLogId)), + ) + .and_where(Expr::col(TransactionExpired::TransactionTypeId).eq(transaction_type)) + .and_where(Expr::col(TransactionExpired::TransactionHash).eq(transaction_hash)) + .to_owned() +} diff --git a/event_sidecar/src/sql/tables/transaction_processed.rs b/event_sidecar/src/sql/tables/transaction_processed.rs new file mode 100644 index 00000000..4f6af799 --- /dev/null +++ b/event_sidecar/src/sql/tables/transaction_processed.rs @@ -0,0 +1,118 @@ +use sea_query::{ + error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, + InsertStatement, Query, SelectStatement, Table, TableCreateStatement, +}; + +use super::{event_log::EventLog, transaction_type::TransactionType}; + +#[derive(Iden)] +pub enum TransactionProcessed { + #[iden = "TransactionProcessed"] + Table, + TransactionHash, + TransactionTypeId, + Raw, + EventLogId, +} + +pub fn create_table_stmt() -> TableCreateStatement { + Table::create() + .table(TransactionProcessed::Table) + .if_not_exists() + .col( + ColumnDef::new(TransactionProcessed::TransactionHash) + .string() + .not_null(), + ) + .col( + ColumnDef::new(TransactionProcessed::TransactionTypeId) + .tiny_unsigned() + .not_null(), + ) + .col(ColumnDef::new(TransactionProcessed::Raw).text().not_null()) + .col( + ColumnDef::new(TransactionProcessed::EventLogId) + .big_unsigned() + .not_null(), + ) + .index(&mut primary_key()) + .foreign_key(&mut event_log_fk()) + .foreign_key(&mut transaction_type_fk()) + .to_owned() +} + +fn transaction_type_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_transaction_type_id") + .from( + TransactionProcessed::Table, + TransactionProcessed::TransactionTypeId, + ) + .to(TransactionType::Table, TransactionType::TransactionTypeId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn event_log_fk() -> sea_query::ForeignKeyCreateStatement { + ForeignKey::create() + .name("FK_event_log_id") + .from( + TransactionProcessed::Table, + TransactionProcessed::EventLogId, + ) + .to(EventLog::Table, EventLog::EventLogId) + .on_delete(ForeignKeyAction::Restrict) + .on_update(ForeignKeyAction::Restrict) + .to_owned() +} + +fn primary_key() -> sea_query::IndexCreateStatement { + Index::create() + .name("PDX_TransactionProcessed") + .col(TransactionProcessed::TransactionHash) + .col(TransactionProcessed::TransactionTypeId) + .primary() + .to_owned() +} + +pub fn create_insert_stmt( + transaction_type: u8, + transaction_hash: String, + raw: String, + event_log_id: u64, +) -> SqResult { + Query::insert() + .into_table(TransactionProcessed::Table) + .columns([ + TransactionProcessed::TransactionTypeId, + TransactionProcessed::TransactionHash, + TransactionProcessed::Raw, + TransactionProcessed::EventLogId, + ]) + .values(vec![ + transaction_type.into(), + transaction_hash.into(), + raw.into(), + event_log_id.into(), + ]) + .map(|stmt| stmt.to_owned()) +} + +pub fn create_get_by_hash_stmt(transaction_type: u8, transaction_hash: String) -> SelectStatement { + Query::select() + .column(TransactionProcessed::Raw) + .column(EventLog::ApiVersion) + .column(EventLog::NetworkName) + .from(TransactionProcessed::Table) + .left_join( + EventLog::Table, + Expr::col((EventLog::Table, EventLog::EventLogId)).equals(( + TransactionProcessed::Table, + TransactionProcessed::EventLogId, + )), + ) + .and_where(Expr::col(TransactionProcessed::TransactionTypeId).eq(transaction_type)) + .and_where(Expr::col(TransactionProcessed::TransactionHash).eq(transaction_hash)) + .to_owned() +} diff --git a/event_sidecar/src/sql/tables/transaction_type.rs b/event_sidecar/src/sql/tables/transaction_type.rs new file mode 100644 index 00000000..e5c7ef23 --- /dev/null +++ b/event_sidecar/src/sql/tables/transaction_type.rs @@ -0,0 +1,61 @@ +use sea_query::{ + error::Result as SqResult, ColumnDef, Iden, InsertStatement, OnConflict, Query, Table, + TableCreateStatement, +}; + +#[derive(Clone)] +pub enum TransactionTypeId { + Deploy = 0, + Version1 = 1, +} + +#[allow(clippy::enum_variant_names)] +#[derive(Iden)] +pub(super) enum TransactionType { + #[iden = "TransactionType"] + Table, + TransactionTypeId, + TransactionTypeName, +} + +pub fn create_table_stmt() -> TableCreateStatement { + Table::create() + .table(TransactionType::Table) + .if_not_exists() + .col( + ColumnDef::new(TransactionType::TransactionTypeId) + .integer() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(TransactionType::TransactionTypeName) + .string() + .not_null() + .unique_key(), + ) + .to_owned() +} + +pub fn create_initialise_stmt() -> SqResult { + Ok(Query::insert() + .into_table(TransactionType::Table) + .columns([ + TransactionType::TransactionTypeId, + TransactionType::TransactionTypeName, + ]) + .values(vec![ + (TransactionTypeId::Deploy as u8).into(), + "Deploy".into(), + ])? + .values(vec![ + (TransactionTypeId::Version1 as u8).into(), + "Version1".into(), + ])? + .on_conflict( + OnConflict::column(TransactionType::TransactionTypeId) + .do_nothing() + .to_owned(), + ) + .to_owned()) +} diff --git a/sidecar/src/testing.rs b/event_sidecar/src/testing.rs similarity index 100% rename from sidecar/src/testing.rs rename to event_sidecar/src/testing.rs diff --git a/event_sidecar/src/testing/fake_database.rs b/event_sidecar/src/testing/fake_database.rs new file mode 100644 index 00000000..417651c0 --- /dev/null +++ b/event_sidecar/src/testing/fake_database.rs @@ -0,0 +1,657 @@ +use crate::database::types::SseEnvelope; +use crate::types::{ + database::{ + DatabaseReadError, DatabaseReader, DatabaseWriteError, DatabaseWriter, Migration, + TransactionAggregate, TransactionTypeId, + }, + sse_events::*, +}; +use async_trait::async_trait; +use casper_types::{testing::TestRng, AsymmetricType, FinalitySignature as FinSig}; +use rand::Rng; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::{SystemTime, UNIX_EPOCH}, +}; + +#[derive(Clone)] +pub struct FakeDatabase { + data: Arc>>, +} + +impl FakeDatabase { + pub(crate) fn new() -> Self { + Self { + data: Arc::new(Mutex::new(HashMap::new())), + } + } + + /// Creates random SSE event data and saves them, returning the identifiers for each record. + #[allow(clippy::too_many_lines)] + pub(crate) async fn populate_with_events( + &self, + ) -> Result { + let mut rng = TestRng::new(); + let block_added = BlockAdded::random(&mut rng); + let transaction_accepted = TransactionAccepted::random(&mut rng); + let transaction_processed = TransactionProcessed::random(&mut rng, None); + let transaction_expired = TransactionExpired::random(&mut rng, None); + let fault = Fault::random(&mut rng); + let finality_signature = FinalitySignature::random(&mut rng); + let step = Step::random(&mut rng); + + let test_stored_keys = IdentifiersForStoredEvents { + block_added_hash: block_added.hex_encoded_hash(), + block_added_height: block_added.get_height(), + transaction_accepted_info: ( + transaction_accepted.hex_encoded_hash(), + transaction_accepted.api_transaction_type_id(), + ), + transaction_processed_info: ( + transaction_processed.hex_encoded_hash(), + transaction_processed.api_transaction_type_id(), + ), + transaction_expired_info: ( + transaction_expired.hex_encoded_hash(), + transaction_expired.api_transaction_type_id(), + ), + fault_era_id: fault.era_id.value(), + fault_public_key: fault.public_key.to_hex(), + finality_signatures_block_hash: finality_signature.hex_encoded_block_hash(), + step_era_id: step.era_id.value(), + }; + + self.save_block_added_with_event_log_data(block_added, &mut rng) + .await?; + self.save_transaction_accepted_with_event_log_data(transaction_accepted, &mut rng) + .await?; + self.save_transaction_processed_with_event_log_data(transaction_processed, &mut rng) + .await?; + self.save_transaction_expired_with_event_log_data(transaction_expired, &mut rng) + .await?; + self.save_fault_with_event_log_data(fault, &mut rng).await?; + self.save_finality_signature_with_event_log_data(finality_signature, &mut rng) + .await?; + self.save_step_with_event_log_data(step, rng).await?; + + Ok(test_stored_keys) + } + + async fn save_step_with_event_log_data( + &self, + step: Step, + mut rng: TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_step( + step, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_finality_signature_with_event_log_data( + &self, + finality_signature: FinalitySignature, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_finality_signature( + finality_signature, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_fault_with_event_log_data( + &self, + fault: Fault, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_fault( + fault, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_transaction_expired_with_event_log_data( + &self, + transaction_expired: TransactionExpired, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_transaction_expired( + transaction_expired, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_transaction_processed_with_event_log_data( + &self, + transaction_processed: TransactionProcessed, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_transaction_processed( + transaction_processed, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_transaction_accepted_with_event_log_data( + &self, + transaction_accepted: TransactionAccepted, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_transaction_accepted( + transaction_accepted, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } + + async fn save_block_added_with_event_log_data( + &self, + block_added: BlockAdded, + rng: &mut TestRng, + ) -> Result<(), DatabaseWriteError> { + self.save_block_added( + block_added, + rng.gen(), + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) + .await?; + Ok(()) + } +} + +#[async_trait] +impl DatabaseWriter for FakeDatabase { + #[allow(unused)] + async fn save_block_added( + &self, + block_added: BlockAdded, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let identifier_hash = block_added.hex_encoded_hash(); + let identifier_height = block_added.get_height().to_string(); + let stringified_event = + serde_json::to_string(&block_added).expect("Error serialising event data"); + + // For the sake of keeping the test fixture simple, I'm saving the event twice, one record for each identifier. + + data.insert(identifier_hash, stringified_event.clone()); + + data.insert(identifier_height, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_transaction_accepted( + &self, + transaction_accepted: TransactionAccepted, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let hash = transaction_accepted.hex_encoded_hash(); + // This is suffixed to allow storage of each transaction state event without overwriting. + let identifier = format!("{}-accepted", hash); + let stringified_event = + serde_json::to_string(&transaction_accepted).expect("Error serialising event data"); + + data.insert(identifier, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_transaction_processed( + &self, + transaction_processed: TransactionProcessed, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let hash = transaction_processed.hex_encoded_hash(); + // This is suffixed to allow storage of each transaction state event without overwriting. + let identifier = format!("{}-processed", hash); + let stringified_event = + serde_json::to_string(&transaction_processed).expect("Error serialising event data"); + + data.insert(identifier, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_transaction_expired( + &self, + transaction_expired: TransactionExpired, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let hash = transaction_expired.hex_encoded_hash(); + // This is suffixed to allow storage of each transaction state event without overwriting. + let identifier = format!("{}-expired", hash); + let stringified_event = + serde_json::to_string(&transaction_expired).expect("Error serialising event data"); + + data.insert(identifier, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_fault( + &self, + fault: Fault, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let identifier_era = fault.era_id.value().to_string(); + let identifier_public_key = fault.public_key.to_hex(); + + let stringified_event = + serde_json::to_string(&fault).expect("Error serialising event data"); + + // For the sake of keeping the test fixture simple, I'm saving the event twice, one record for each identifier. + + data.insert(identifier_era, stringified_event.clone()); + + data.insert(identifier_public_key, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_finality_signature( + &self, + finality_signature: FinalitySignature, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let identifier = finality_signature.hex_encoded_block_hash(); + let stringified_event = + serde_json::to_string(&finality_signature).expect("Error serialising event data"); + + data.insert(identifier, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_step( + &self, + step: Step, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + + let identifier = step.era_id.value().to_string(); + let stringified_event = serde_json::to_string(&step).expect("Error serialising event data"); + + data.insert(identifier, stringified_event); + + Ok(0) + } + + #[allow(unused)] + async fn save_shutdown( + &self, + event_id: u32, + event_source_address: String, + api_version: String, + network_name: String, + ) -> Result { + let mut data = self.data.lock().expect("Error acquiring lock on data"); + let unix_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs(); + let event_key = format!("{}-{}", event_source_address, unix_timestamp); + let stringified_event = serde_json::to_string("{}").expect("Error serialising event data"); + + data.insert(event_key, stringified_event); + Ok(0) + } + + async fn execute_migration(&self, _migration: Migration) -> Result<(), DatabaseWriteError> { + //Nothing to do here + Ok(()) + } +} + +#[async_trait] +impl DatabaseReader for FakeDatabase { + async fn get_latest_block(&self) -> Result, DatabaseReadError> { + let mut test_rng = TestRng::new(); + + let block_added = BlockAdded::random(&mut test_rng); + + Ok(SseEnvelope::new( + block_added, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } + + async fn get_block_by_height( + &self, + height: u64, + ) -> Result, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&height.to_string()) { + let entity = serde_json::from_str::(event) + .map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_block_by_hash( + &self, + hash: &str, + ) -> Result, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(hash) { + let entity = serde_json::from_str::(event) + .map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + #[allow(clippy::too_many_lines)] + async fn get_transaction_aggregate_by_identifier( + &self, + _transaction_type: &TransactionTypeId, + hash: &str, + ) -> Result { + let data = self.data.lock().expect("Error acquiring lock on data"); + + let accepted_key = format!("{}-accepted", hash); + let processed_key = format!("{}-processed", hash); + let expired_key = format!("{}-expired", hash); + + return if let Some(accepted) = data.get(&accepted_key) { + let transaction_accepted = serde_json::from_str::(accepted) + .map_err(DatabaseReadError::Serialisation)?; + + if let Some(processed) = data.get(&processed_key) { + let transaction_processed = serde_json::from_str::(processed) + .map_err(DatabaseReadError::Serialisation)?; + + Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(SseEnvelope::new( + transaction_accepted, + "2.0.0".to_string(), + "network-1".to_string(), + )), + transaction_processed: Some(SseEnvelope::new( + transaction_processed, + "2.0.0".to_string(), + "network-1".to_string(), + )), + transaction_expired: false, + }) + } else if data.get(&expired_key).is_some() { + let transaction_expired = match data.get(&expired_key) { + None => None, + Some(raw) => Some( + serde_json::from_str::(raw) + .map_err(DatabaseReadError::Serialisation)?, + ), + }; + Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(SseEnvelope::new( + transaction_accepted, + "2.0.0".to_string(), + "network-1".to_string(), + )), + transaction_processed: None, + transaction_expired: transaction_expired.is_some(), + }) + } else { + Ok(TransactionAggregate { + transaction_hash: hash.to_string(), + transaction_accepted: Some(SseEnvelope::new( + transaction_accepted, + "2.0.0".to_string(), + "network-1".to_string(), + )), + transaction_processed: None, + transaction_expired: false, + }) + } + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_transaction_accepted_by_hash( + &self, + _transaction_type: &TransactionTypeId, + hash: &str, + ) -> Result, DatabaseReadError> { + let identifier = format!("{}-accepted", hash); + + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&identifier) { + let entity = serde_json::from_str::(event) + .map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_transaction_processed_by_hash( + &self, + _transaction_type: &TransactionTypeId, + hash: &str, + ) -> Result, DatabaseReadError> { + let identifier = format!("{}-processed", hash); + + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&identifier) { + let entity = serde_json::from_str::(event) + .map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_transaction_expired_by_hash( + &self, + _transaction_type: &TransactionTypeId, + hash: &str, + ) -> Result, DatabaseReadError> { + let identifier = format!("{}-expired", hash); + + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&identifier) { + let entity = serde_json::from_str::(event) + .map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_faults_by_public_key( + &self, + public_key: &str, + ) -> Result>, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(public_key) { + let fault = + serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; + Ok(vec![SseEnvelope::new( + fault, + "2.0.0".to_string(), + "network-1".to_string(), + )]) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_faults_by_era( + &self, + era: u64, + ) -> Result>, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&era.to_string()) { + let fault = + serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; + Ok(vec![SseEnvelope::new( + fault, + "2.0.0".to_string(), + "network-1".to_string(), + )]) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_finality_signatures_by_block( + &self, + block_hash: &str, + ) -> Result>, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(block_hash) { + let finality_signature = + serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; + Ok(vec![SseEnvelope::new( + finality_signature, + "2.0.0".to_string(), + "network-1".to_string(), + )]) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_step_by_era(&self, era: u64) -> Result, DatabaseReadError> { + let data = self.data.lock().expect("Error acquiring lock on data"); + + return if let Some(event) = data.get(&era.to_string()) { + let entity = + serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; + Ok(SseEnvelope::new( + entity, + "2.0.0".to_string(), + "network-1".to_string(), + )) + } else { + Err(DatabaseReadError::NotFound) + }; + } + + async fn get_number_of_events(&self) -> Result { + Ok(0) + } + + async fn get_newest_migration_version(&self) -> Result, DatabaseReadError> { + Ok(None) + } +} + +pub struct IdentifiersForStoredEvents { + pub block_added_hash: String, + pub block_added_height: u64, + pub transaction_accepted_info: (String, TransactionTypeId), + pub transaction_processed_info: (String, TransactionTypeId), + pub transaction_expired_info: (String, TransactionTypeId), + pub fault_public_key: String, + pub fault_era_id: u64, + pub finality_signatures_block_hash: String, + pub step_era_id: u64, +} diff --git a/sidecar/src/testing/fake_event_stream.rs b/event_sidecar/src/testing/fake_event_stream.rs similarity index 80% rename from sidecar/src/testing/fake_event_stream.rs rename to event_sidecar/src/testing/fake_event_stream.rs index 8cf96063..f3a303ef 100644 --- a/sidecar/src/testing/fake_event_stream.rs +++ b/event_sidecar/src/testing/fake_event_stream.rs @@ -20,17 +20,17 @@ use crate::{ utils::tests::display_duration, }; use casper_event_types::{sse_data::SseData, Filter as SseFilter}; -use casper_types::{testing::TestRng, ProtocolVersion}; +use casper_types::{testing::TestRng, ProtocolVersion, Transaction}; use warp::{path::end, Filter}; const TIME_BETWEEN_BLOCKS: Duration = Duration::from_secs(30); const BLOCKS_IN_ERA: u64 = 4; const NUMBER_OF_VALIDATORS: u16 = 100; -const NUMBER_OF_DEPLOYS_PER_BLOCK: u16 = 20; -const API_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 5, 2); +const NUMBER_OF_TRANSACTIONS_PER_BLOCK: u16 = 20; +const API_VERSION: ProtocolVersion = ProtocolVersion::from_parts(2, 0, 0); type FrequencyOfStepEvents = u8; -type NumberOfDeployEventsInBurst = u64; +type NumberOftransactionEventsInBurst = u64; #[derive(Clone)] pub enum Bound { @@ -61,7 +61,7 @@ pub struct Restart { pub enum Scenario { Realistic(GenericScenarioSettings), LoadTestingStep(GenericScenarioSettings, FrequencyOfStepEvents), - LoadTestingDeploy(GenericScenarioSettings, NumberOfDeployEventsInBurst), + LoadTestingTransaction(GenericScenarioSettings, NumberOftransactionEventsInBurst), Spam(Bound), } @@ -72,8 +72,8 @@ impl Display for Scenario { Scenario::LoadTestingStep(_, _) => { write!(f, "Load Testing [Step]") } - Scenario::LoadTestingDeploy(_, _) => { - write!(f, "Load Testing [Deploy]") + Scenario::LoadTestingTransaction(_, _) => { + write!(f, "Load Testing [transaction]") } Scenario::Spam(_) => { write!(f, "Spam") @@ -112,8 +112,8 @@ async fn execute_scenario( ) .await } - Scenario::LoadTestingDeploy(settings, num_in_burst) => { - do_load_testing_deploy( + Scenario::LoadTestingTransaction(settings, num_in_burst) => { + do_load_testing_transaction( test_rng, events_sender, events_receiver, @@ -174,8 +174,9 @@ fn build_event_stream_server( println!("{} :: Started", log_details); let temp_dir = TempDir::new().expect("Error creating temporary directory"); - let event_stream_server = EventStreamServer::new(ess_config, temp_dir.path().to_path_buf()) - .expect("Error spinning up Event Stream Server"); + let event_stream_server = + EventStreamServer::new(ess_config, temp_dir.path().to_path_buf(), true) + .expect("Error spinning up Event Stream Server"); (event_stream_server, log_details) } @@ -187,13 +188,13 @@ async fn do_spam_testing( bound: Bound, ) -> TestRng { let scenario_task = tokio::spawn(async move { - spam_deploy(&mut test_rng, events_sender.clone(), bound).await; + spam_transaction(&mut test_rng, events_sender.clone(), bound).await; test_rng }); let broadcasting_task = tokio::spawn(async move { while let Some(event) = events_receiver.recv().await { - event_stream_server.broadcast(event, Some(SseFilter::Main), None); + event_stream_server.broadcast(event, Some(SseFilter::Events)); } }); @@ -201,7 +202,7 @@ async fn do_spam_testing( test_rng.expect("Should have returned TestRng for re-use") } -async fn do_load_testing_deploy( +async fn do_load_testing_transaction( mut test_rng: TestRng, events_sender: Sender, mut events_receiver: Receiver, @@ -210,7 +211,7 @@ async fn do_load_testing_deploy( num_in_burst: u64, ) -> TestRng { let scenario_task = tokio::spawn(async move { - load_testing_deploy( + load_testing_transaction( &mut test_rng, events_sender.clone(), settings.initial_phase, @@ -226,17 +227,17 @@ async fn do_load_testing_deploy( events_sender .send(SseData::Shutdown) .await - .expect("Scenario::LoadTestingDeploy failed sending shutdown message!"); + .expect("Scenario::LoadTestingtransaction failed sending shutdown message!"); tokio::time::sleep(delay_before_restart).await; - load_testing_deploy(&mut test_rng, events_sender, final_phase, num_in_burst).await; + load_testing_transaction(&mut test_rng, events_sender, final_phase, num_in_burst).await; } test_rng }); let broadcasting_task = tokio::spawn(async move { while let Some(event) = events_receiver.recv().await { - event_stream_server.broadcast(event, Some(SseFilter::Main), None); + event_stream_server.broadcast(event, Some(SseFilter::Events)); } }); @@ -278,7 +279,7 @@ async fn do_load_testing_step( }); let broadcasting_task = tokio::spawn(async move { while let Some(event) = events_receiver.recv().await { - event_stream_server.broadcast(event, Some(SseFilter::Main), None); + event_stream_server.broadcast(event, Some(SseFilter::Events)); } }); let (test_rng, _) = tokio::join!(scenario_task, broadcasting_task); @@ -313,7 +314,7 @@ async fn handle_realistic_scenario( }); let broadcasting_task = tokio::spawn(async move { while let Some(event) = events_receiver.recv().await { - event_stream_server.broadcast(event, Some(SseFilter::Main), None); + event_stream_server.broadcast(event, Some(SseFilter::Events)); } }); let (test_rng, _) = tokio::join!(scenario_task, broadcasting_task); @@ -349,7 +350,7 @@ async fn realistic_event_streaming( type RealisticScenarioData = ( Vec, - Vec<(SseData, casper_event_types::Deploy)>, + Vec<(SseData, Transaction)>, Vec, Vec, Vec, @@ -360,22 +361,25 @@ type RealisticScenarioData = ( fn prepare_data(test_rng: &mut TestRng, loops_in_duration: u64) -> RealisticScenarioData { let finality_signatures_per_loop = NUMBER_OF_VALIDATORS as u64; let total_finality_signature_events = finality_signatures_per_loop * loops_in_duration; - let deploy_events_per_loop = NUMBER_OF_DEPLOYS_PER_BLOCK as u64; - let total_deploy_events = deploy_events_per_loop * loops_in_duration; + let transaction_events_per_loop = NUMBER_OF_TRANSACTIONS_PER_BLOCK as u64; + let total_transaction_events = transaction_events_per_loop * loops_in_duration; let total_block_added_events = loops_in_duration; let total_step_events = loops_in_duration / BLOCKS_IN_ERA; let block_added_events = iter::repeat_with(|| SseData::random_block_added(test_rng)) .take(plus_twenty_percent(total_block_added_events) as usize) .collect_vec(); - let deploy_accepted_events = iter::repeat_with(|| SseData::random_deploy_accepted(test_rng)) - .take(plus_twenty_percent(total_deploy_events) as usize) - .collect_vec(); - let deploy_expired_events = iter::repeat_with(|| SseData::random_deploy_expired(test_rng)) - .take((loops_in_duration / 2 + 1) as usize) - .collect_vec(); - let deploy_processed_events = iter::repeat_with(|| SseData::random_deploy_processed(test_rng)) - .take(plus_twenty_percent(total_deploy_events) as usize) - .collect_vec(); + let transaction_accepted_events = + iter::repeat_with(|| SseData::random_transaction_accepted(test_rng)) + .take(plus_twenty_percent(total_transaction_events) as usize) + .collect_vec(); + let transaction_expired_events = + iter::repeat_with(|| SseData::random_transaction_expired(test_rng)) + .take((loops_in_duration / 2 + 1) as usize) + .collect_vec(); + let transaction_processed_events = + iter::repeat_with(|| SseData::random_transaction_processed(test_rng)) + .take(plus_twenty_percent(total_transaction_events) as usize) + .collect_vec(); let fault_events = iter::repeat_with(|| SseData::random_fault(test_rng)) .take((loops_in_duration / 2 + 1) as usize) .collect_vec(); @@ -388,9 +392,9 @@ fn prepare_data(test_rng: &mut TestRng, loops_in_duration: u64) -> RealisticScen .collect_vec(); ( block_added_events, - deploy_accepted_events, - deploy_expired_events, - deploy_processed_events, + transaction_accepted_events, + transaction_expired_events, + transaction_processed_events, fault_events, finality_signature_events, step_events, @@ -407,9 +411,9 @@ async fn do_stream( ) { let ( mut block_added_events, - mut deploy_accepted_events, - mut deploy_expired_events, - mut deploy_processed_events, + mut transaction_accepted_events, + mut transaction_expired_events, + mut transaction_processed_events, mut fault_events, mut finality_signature_events, mut step_events, @@ -428,17 +432,17 @@ async fn do_stream( emit_events( &events_sender, &mut finality_signature_events, - &mut deploy_processed_events, + &mut transaction_processed_events, &mut block_added_events, - &mut deploy_accepted_events, + &mut transaction_accepted_events, ) .await; } if era_counter % 2 == 0 { events_sender - .send(deploy_expired_events.pop().unwrap()) + .send(transaction_expired_events.pop().unwrap()) .await - .expect("Failed sending deploy_expired_event"); + .expect("Failed sending transaction_expired_event"); } else { events_sender .send(fault_events.pop().unwrap()) @@ -453,14 +457,14 @@ async fn do_stream( async fn emit_events( events_sender: &Sender, finality_signature_events: &mut Vec, - deploy_processed_events: &mut Vec, + transaction_processed_events: &mut Vec, block_added_events: &mut Vec, - deploy_accepted_events: &mut Vec<(SseData, casper_event_types::Deploy)>, + transaction_accepted_events: &mut Vec<(SseData, casper_types::Transaction)>, ) { emit_sig_events(events_sender, finality_signature_events).await; - emit_deploy_processed_events(events_sender, deploy_processed_events).await; + emit_transaction_processed_events(events_sender, transaction_processed_events).await; emit_block_added_events(events_sender, block_added_events).await; - emit_deploy_accepted_events(events_sender, deploy_accepted_events).await; + emit_transaction_accepted_events(events_sender, transaction_accepted_events).await; } async fn emit_block_added_events( @@ -473,15 +477,15 @@ async fn emit_block_added_events( .expect("Failed sending block_added_event"); } -async fn emit_deploy_accepted_events( +async fn emit_transaction_accepted_events( events_sender: &Sender, - deploy_accepted_events: &mut Vec<(SseData, casper_event_types::Deploy)>, + transaction_accepted_events: &mut Vec<(SseData, casper_types::Transaction)>, ) { - for _ in 0..NUMBER_OF_DEPLOYS_PER_BLOCK { + for _ in 0..NUMBER_OF_TRANSACTIONS_PER_BLOCK { events_sender - .send(deploy_accepted_events.pop().unwrap().0) + .send(transaction_accepted_events.pop().unwrap().0) .await - .expect("Failed sending deploy_accepted_event"); + .expect("Failed sending transaction_accepted_event"); } } @@ -492,15 +496,15 @@ async fn emit_step(events_sender: &Sender, step_events: &mut Vec, - deploy_processed_events: &mut Vec, + transaction_processed_events: &mut Vec, ) { - for _ in 0..NUMBER_OF_DEPLOYS_PER_BLOCK { + for _ in 0..NUMBER_OF_TRANSACTIONS_PER_BLOCK { events_sender - .send(deploy_processed_events.pop().unwrap()) + .send(transaction_processed_events.pop().unwrap()) .await - .expect("Failed sending deploy_processed_events"); + .expect("Failed sending transaction_processed_events"); } } @@ -540,7 +544,7 @@ async fn load_testing_step( } } -async fn spam_deploy(test_rng: &mut TestRng, events_sender: Sender, bound: Bound) { +async fn spam_transaction(test_rng: &mut TestRng, events_sender: Sender, bound: Bound) { let start_time = Instant::now(); events_sender .send(SseData::ApiVersion(API_VERSION)) @@ -551,16 +555,16 @@ async fn spam_deploy(test_rng: &mut TestRng, events_sender: Sender, bou while start_time.elapsed() < duration { for _ in 0..100 { events_sender - .send(SseData::random_deploy_accepted(test_rng).0) + .send(SseData::random_transaction_accepted(test_rng).0) .await - .expect("failed sending random_deploy_accepted"); + .expect("failed sending random_transaction_accepted"); } } } } } -async fn load_testing_deploy( +async fn load_testing_transaction( test_rng: &mut TestRng, events_sender: Sender, bound: Bound, @@ -577,16 +581,16 @@ async fn load_testing_deploy( while start_time.elapsed() < duration { for _ in 0..burst_size { events_sender - .send(SseData::random_deploy_accepted(test_rng).0) + .send(SseData::random_transaction_accepted(test_rng).0) .await - .expect("failed sending random_deploy_accepted"); + .expect("failed sending random_transaction_accepted"); } tokio::time::sleep(Duration::from_millis(500)).await; for _ in 0..burst_size { events_sender - .send(SseData::random_deploy_processed(test_rng)) + .send(SseData::random_transaction_processed(test_rng)) .await - .expect("failed sending random_deploy_processed"); + .expect("failed sending random_transaction_processed"); } } } @@ -594,19 +598,22 @@ async fn load_testing_deploy( } pub async fn setup_mock_build_version_server(port: u16) -> (Sender<()>, Receiver<()>) { - setup_mock_build_version_server_with_version(port, "1.5.2".to_string()).await + setup_mock_build_version_server_with_version(port, "2.0.0".to_string(), "network-1".to_string()) + .await } pub async fn setup_mock_build_version_server_with_version( port: u16, version: String, + network_name: String, ) -> (Sender<()>, Receiver<()>) { let (shutdown_tx, mut shutdown_rx) = mpsc_channel(10); let (after_shutdown_tx, after_shutdown_rx) = mpsc_channel(10); let api = warp::path!("status") .and(warp::get()) .map(move || { - let result = json!({ "build_version": version.clone() }); + let result = + json!({ "build_version": version.clone(), "chainspec_name": network_name.clone()}); warp::reply::json(&result) }) .and(end()); diff --git a/sidecar/src/testing/mock_node.rs b/event_sidecar/src/testing/mock_node.rs similarity index 85% rename from sidecar/src/testing/mock_node.rs rename to event_sidecar/src/testing/mock_node.rs index 8f8c3fef..4965d784 100644 --- a/sidecar/src/testing/mock_node.rs +++ b/event_sidecar/src/testing/mock_node.rs @@ -4,7 +4,7 @@ pub mod tests { use crate::testing::fake_event_stream::setup_mock_build_version_server_with_version; use crate::testing::raw_sse_events_utils::tests::{ - example_data_1_5_3, simple_sse_server, sse_server_example_data, EventsWithIds, + example_data_2_0_1, simple_sse_server, sse_server_example_data, EventsWithIds, }; use crate::testing::testing_config::get_port; use futures::join; @@ -13,6 +13,7 @@ pub mod tests { pub struct MockNodeBuilder { pub version: String, + pub network_name: String, pub data_of_node: EventsWithIds, pub cache_of_node: Option, pub sse_port: Option, @@ -20,13 +21,14 @@ pub mod tests { } impl MockNodeBuilder { - pub fn build_example_1_5_3_node( + pub fn build_example_2_0_1_node( node_port_for_sse_connection: u16, node_port_for_rest_connection: u16, ) -> MockNode { MockNodeBuilder { - version: "1.5.3".to_string(), - data_of_node: example_data_1_5_3(), + version: "2.0.1".to_string(), + network_name: "network1".to_string(), + data_of_node: example_data_2_0_1(), cache_of_node: None, sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), @@ -34,24 +36,43 @@ pub mod tests { .build() } - pub fn build_example_1_5_2_node( + pub fn build_example_2_0_0_node( node_port_for_sse_connection: u16, node_port_for_rest_connection: u16, ) -> MockNode { Self::build_example_node_with_version( Some(node_port_for_sse_connection), Some(node_port_for_rest_connection), - "1.5.2", + "2.0.0", + "network1", ) } + pub fn build_example_2_0_0_node_with_data( + node_port_for_sse_connection: u16, + node_port_for_rest_connection: u16, + data: EventsWithIds, + ) -> MockNode { + MockNodeBuilder { + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), + data_of_node: data, + cache_of_node: None, + sse_port: Some(node_port_for_sse_connection), + rest_port: Some(node_port_for_rest_connection), + } + .build() + } + pub fn build_example_node_with_version( node_port_for_sse_connection: Option, node_port_for_rest_connection: Option, version: &str, + network_name: &str, ) -> MockNode { MockNodeBuilder { version: version.to_string(), + network_name: network_name.to_string(), data_of_node: sse_server_example_data(version), cache_of_node: None, sse_port: node_port_for_sse_connection, @@ -69,12 +90,14 @@ pub mod tests { cache_of_node, sse_port, rest_port, + self.network_name.clone(), ) } } pub struct MockNode { version: String, + network_name: String, data_of_node: EventsWithIds, cache_of_node: EventsWithIds, sse_port: u16, @@ -108,9 +131,11 @@ pub mod tests { cache_of_node: EventsWithIds, sse_port: u16, rest_port: u16, + network_name: String, ) -> MockNode { MockNode { version, + network_name, data_of_node, cache_of_node, sse_port, @@ -126,6 +151,7 @@ pub mod tests { let data_of_node_clone = self.data_of_node.clone(); let cache_of_node = self.cache_of_node.clone(); let version = self.version.clone(); + let network_name = self.network_name.clone(); let sse_port = self.sse_port; let rest_port = self.rest_port; @@ -136,7 +162,7 @@ pub mod tests { }); //Spin up rest server let rest_server_join = tokio::spawn(async move { - setup_mock_build_version_server_with_version(rest_port, version).await + setup_mock_build_version_server_with_version(rest_port, version, network_name).await }); //Get handles to stop the above servers, store them in the structure let sse_and_rest_joins = join!(sse_server_join, rest_server_join); diff --git a/sidecar/src/testing/raw_sse_events_utils.rs b/event_sidecar/src/testing/raw_sse_events_utils.rs similarity index 70% rename from sidecar/src/testing/raw_sse_events_utils.rs rename to event_sidecar/src/testing/raw_sse_events_utils.rs index 7d07cd6f..20b8fc5f 100644 --- a/sidecar/src/testing/raw_sse_events_utils.rs +++ b/event_sidecar/src/testing/raw_sse_events_utils.rs @@ -10,23 +10,23 @@ pub(crate) mod tests { pub type EventsWithIds = Vec<(Option, String)>; - pub fn example_data_1_5_3() -> EventsWithIds { + pub fn example_data_2_0_1() -> EventsWithIds { vec![ - (None, "{\"ApiVersion\":\"1.5.3\"}".to_string()), + (None, "{\"ApiVersion\":\"2.0.1\"}".to_string()), ( Some("0".to_string()), - example_block_added_1_5_2(BLOCK_HASH_3, "3"), + example_block_added_2_0_0(BLOCK_HASH_3, 3u64), ), ] } - pub fn sse_server_shutdown_1_5_2_data() -> EventsWithIds { + pub fn sse_server_shutdown_2_0_0_data() -> EventsWithIds { vec![ - (None, "{\"ApiVersion\":\"1.5.2\"}".to_string()), + (None, "{\"ApiVersion\":\"2.0.0\"}".to_string()), (Some("0".to_string()), shutdown()), ( Some("1".to_string()), - example_block_added_1_5_2(BLOCK_HASH_1, "1"), + example_block_added_2_0_0(BLOCK_HASH_1, 1u64), ), ] } @@ -38,7 +38,7 @@ pub(crate) mod tests { ) -> (EventsWithIds, TestRng) { let (blocks_added, rng) = generate_random_blocks_added(number_of_block_added_messages, start_index, rng); - let data = vec![(None, "{\"ApiVersion\":\"1.5.2\"}".to_string())]; + let data = vec![(None, "{\"ApiVersion\":\"2.0.0\"}".to_string())]; let mut data: EventsWithIds = data.into_iter().chain(blocks_added).collect(); let shutdown_index: u32 = start_index + 31; data.push((Some(shutdown_index.to_string()), shutdown())); @@ -50,41 +50,51 @@ pub(crate) mod tests { (None, format!("{{\"ApiVersion\":\"{version}\"}}")), ( Some("1".to_string()), - example_block_added_1_5_2(BLOCK_HASH_2, "2"), + example_block_added_2_0_0(BLOCK_HASH_2, 2u64), ), ] } - pub fn sse_server_example_1_5_2_data() -> EventsWithIds { + pub fn sse_server_example_2_0_0_data() -> EventsWithIds { vec![ - (None, "{\"ApiVersion\":\"1.5.2\"}".to_string()), + (None, "{\"ApiVersion\":\"2.0.0\"}".to_string()), ( Some("1".to_string()), - example_block_added_1_5_2(BLOCK_HASH_2, "2"), + example_block_added_2_0_0(BLOCK_HASH_2, 2u64), ), ] } - pub fn sse_server_example_1_5_2_data_second() -> EventsWithIds { + pub fn sse_server_sigs_2_0_0_data() -> EventsWithIds { vec![ - (None, "{\"ApiVersion\":\"1.5.2\"}".to_string()), + (None, "{\"ApiVersion\":\"2.0.0\"}".to_string()), + ( + Some("1".to_string()), + example_finality_signature_2_0_0(BLOCK_HASH_2), + ), + ] + } + + pub fn sse_server_example_2_0_0_data_second() -> EventsWithIds { + vec![ + (None, "{\"ApiVersion\":\"2.0.0\"}".to_string()), ( Some("3".to_string()), - example_block_added_1_5_2(BLOCK_HASH_3, "3"), + example_block_added_2_0_0(BLOCK_HASH_3, 3u64), ), ] } - pub fn sse_server_example_1_5_2_data_third() -> EventsWithIds { + pub fn sse_server_example_2_0_0_data_third() -> EventsWithIds { vec![ - (None, "{\"ApiVersion\":\"1.5.2\"}".to_string()), + (None, "{\"ApiVersion\":\"2.0.0\"}".to_string()), ( Some("1".to_string()), - example_block_added_1_5_2(BLOCK_HASH_3, "3"), + example_block_added_2_0_0(BLOCK_HASH_3, 3u64), ), ( Some("1".to_string()), - example_block_added_1_5_2(BLOCK_HASH_4, "4"), + example_block_added_2_0_0(BLOCK_HASH_4, 4u64), ), ] } @@ -125,7 +135,7 @@ pub(crate) mod tests { if let SseData::BlockAdded { block_hash, .. } = block_added { let encoded_hash = HexFmt(block_hash.inner()).to_string(); let block_added_raw = - example_block_added_1_5_2(encoded_hash.as_str(), index.as_str()); + example_block_added_2_0_0(encoded_hash.as_str(), (i + start_index) as u64); blocks_added.push((Some(index), block_added_raw)); } else { panic!("random_block_added didn't return SseData::BlockAdded"); diff --git a/sidecar/src/testing/shared.rs b/event_sidecar/src/testing/shared.rs similarity index 71% rename from sidecar/src/testing/shared.rs rename to event_sidecar/src/testing/shared.rs index cf535ebd..e25ce367 100644 --- a/sidecar/src/testing/shared.rs +++ b/event_sidecar/src/testing/shared.rs @@ -6,9 +6,9 @@ pub(crate) enum EventType { ApiVersion, SidecarVersion, BlockAdded, - DeployAccepted, - DeployExpired, - DeployProcessed, + TransactionAccepted, + TransactionExpired, + TransactionProcessed, Fault, FinalitySignature, Step, @@ -21,9 +21,9 @@ impl From for EventType { SseData::ApiVersion(_) => EventType::ApiVersion, SseData::SidecarVersion(_) => EventType::SidecarVersion, SseData::BlockAdded { .. } => EventType::BlockAdded, - SseData::DeployAccepted { .. } => EventType::DeployAccepted, - SseData::DeployProcessed { .. } => EventType::DeployProcessed, - SseData::DeployExpired { .. } => EventType::DeployExpired, + SseData::TransactionAccepted { .. } => EventType::TransactionAccepted, + SseData::TransactionProcessed { .. } => EventType::TransactionProcessed, + SseData::TransactionExpired { .. } => EventType::TransactionExpired, SseData::Fault { .. } => EventType::Fault, SseData::FinalitySignature(_) => EventType::FinalitySignature, SseData::Step { .. } => EventType::Step, @@ -38,9 +38,9 @@ impl Display for EventType { EventType::ApiVersion => "ApiVersion", EventType::SidecarVersion => "SidecarVersion", EventType::BlockAdded => "BlockAdded", - EventType::DeployAccepted => "DeployAccepted", - EventType::DeployExpired => "DeployExpired", - EventType::DeployProcessed => "DeployProcessed", + EventType::TransactionAccepted => "TransactionAccepted", + EventType::TransactionExpired => "TransactionExpired", + EventType::TransactionProcessed => "TransactionProcessed", EventType::Fault => "Fault", EventType::FinalitySignature => "FinalitySignature", EventType::Step => "Step", diff --git a/sidecar/src/testing/simple_sse_server.rs b/event_sidecar/src/testing/simple_sse_server.rs similarity index 98% rename from sidecar/src/testing/simple_sse_server.rs rename to event_sidecar/src/testing/simple_sse_server.rs index c455696e..2bf86cbb 100644 --- a/sidecar/src/testing/simple_sse_server.rs +++ b/event_sidecar/src/testing/simple_sse_server.rs @@ -28,10 +28,6 @@ pub(crate) mod tests { pub routes: HashMap, CacheAndData>, } - #[derive(Debug)] - struct Nope; - impl warp::reject::Reject for Nope {} - type ShutdownCallbacks = Arc, String)>>>>>; impl SimpleSseServer { diff --git a/sidecar/src/testing/test_clock.rs b/event_sidecar/src/testing/test_clock.rs similarity index 100% rename from sidecar/src/testing/test_clock.rs rename to event_sidecar/src/testing/test_clock.rs diff --git a/sidecar/src/testing/testing_config.rs b/event_sidecar/src/testing/testing_config.rs similarity index 65% rename from sidecar/src/testing/testing_config.rs rename to event_sidecar/src/testing/testing_config.rs index 2cd870da..90e97c13 100644 --- a/sidecar/src/testing/testing_config.rs +++ b/event_sidecar/src/testing/testing_config.rs @@ -1,13 +1,20 @@ #[cfg(test)] use portpicker::Port; -use std::sync::{Arc, Mutex}; +use std::{ + net::{IpAddr, Ipv4Addr}, + sync::{Arc, Mutex}, +}; use tempfile::TempDir; -use crate::types::config::{Config, Connection, StorageConfig}; +use crate::types::config::{Connection, RestApiServerConfig, SseEventServerConfig, StorageConfig}; /// A basic wrapper with helper methods for constructing and tweaking [Config]s for use in tests. +#[derive(Clone)] pub struct TestingConfig { - pub(crate) config: Config, + pub(crate) event_server_config: SseEventServerConfig, + pub(crate) storage_config: StorageConfig, + pub(crate) rest_api_server_config: RestApiServerConfig, + pub(crate) network_name: Option, } #[cfg(test)] @@ -36,12 +43,21 @@ pub fn get_port() -> u16 { /// - `storage_path` is set to the path of the [TempDir] provided. /// - `node_connection_port` is set dynamically to a free port. /// - The outbound server (REST & SSE) ports are set dynamically to free ports. +/// - If `enable_db_storage` is set to false, the database storage is disabled. #[cfg(test)] -pub(crate) fn prepare_config(temp_storage: &TempDir) -> TestingConfig { +pub(crate) fn prepare_config( + temp_storage: &TempDir, + enable_db_storage: bool, + maybe_network_name: Option, +) -> TestingConfig { let path_to_temp_storage = temp_storage.path().to_string_lossy().to_string(); let mut testing_config = TestingConfig::default(); - testing_config.set_storage_path(path_to_temp_storage); + if !enable_db_storage { + testing_config.storage_config.clear_db_storage(); + } + testing_config.set_storage_folder(path_to_temp_storage); + testing_config.set_network_name(maybe_network_name); testing_config.allocate_available_ports(); testing_config @@ -50,31 +66,45 @@ pub(crate) fn prepare_config(temp_storage: &TempDir) -> TestingConfig { impl TestingConfig { /// Creates a Default instance of TestingConfig which contains a Default instance of [Config] pub(crate) fn default() -> Self { - let config = Config::default(); - - Self { config } + let event_server_config = SseEventServerConfig::default(); + let storage_config = StorageConfig::default(); + let rest_api_server_config = RestApiServerConfig::default(); + Self { + event_server_config, + storage_config, + rest_api_server_config, + network_name: None, + } } /// Specify where test storage (database, sse cache) should be located. /// By default it is set to `/target/test_storage` however it is recommended to overwrite this with a `TempDir` path for testing purposes. - pub(crate) fn set_storage_path(&mut self, path: String) { - self.config.storage.set_storage_path(path); + pub(crate) fn set_storage_folder(&mut self, path: String) { + self.storage_config.set_storage_folder(path.clone()); + } + + pub(crate) fn get_storage_folder(&self) -> String { + self.storage_config.storage_folder.clone() } pub(crate) fn set_storage(&mut self, storage: StorageConfig) { - self.config.storage = storage; + self.storage_config = storage; + } + + pub(crate) fn set_network_name(&mut self, maybe_network_name: Option) { + self.network_name = maybe_network_name; } pub(crate) fn add_connection( &mut self, - ip_address: Option, + ip_address: Option, sse_port: Option, rest_port: Option, ) -> Port { let random_port_for_sse = get_port(); let random_port_for_rest = get_port(); let connection = Connection { - ip_address: ip_address.unwrap_or_else(|| "127.0.0.1".to_string()), + ip_address: ip_address.unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))), sse_port: sse_port.unwrap_or(random_port_for_sse), rest_port: rest_port.unwrap_or(random_port_for_rest), max_attempts: 2, @@ -85,7 +115,7 @@ impl TestingConfig { sleep_between_keep_alive_checks_in_seconds: Some(100), no_message_timeout_in_seconds: Some(100), }; - self.config.connections.push(connection); + self.event_server_config.connections.push(connection); random_port_for_sse } @@ -95,7 +125,7 @@ impl TestingConfig { port_of_node: u16, allow_partial_connection: bool, ) { - for connection in &mut self.config.connections { + for connection in &mut self.event_server_config.connections { if connection.sse_port == port_of_node { connection.allow_partial_connection = allow_partial_connection; break; @@ -112,7 +142,7 @@ impl TestingConfig { max_attempts: usize, delay_between_retries_in_seconds: usize, ) { - for connection in &mut self.config.connections { + for connection in &mut self.event_server_config.connections { if connection.sse_port == port_of_node { connection.max_attempts = max_attempts; connection.delay_between_retries_in_seconds = delay_between_retries_in_seconds; @@ -129,22 +159,21 @@ impl TestingConfig { pub(crate) fn allocate_available_ports(&mut self) { let rest_server_port = get_port(); let sse_server_port = get_port(); - self.config.rest_server.port = rest_server_port; - self.config.event_stream_server.port = sse_server_port; + self.rest_api_server_config.port = rest_server_port; + self.event_server_config.event_stream_server.port = sse_server_port; } /// Returns the inner [Config] - pub(crate) fn inner(&self) -> Config { - self.config.clone() - } - - /// Returns the port that the sidecar REST server is bound to. - pub(crate) fn rest_server_port(&self) -> u16 { - self.config.rest_server.port + pub(crate) fn inner(&self) -> SseEventServerConfig { + self.event_server_config.clone() } /// Returns the port that the sidecar SSE server is bound to. pub(crate) fn event_stream_server_port(&self) -> u16 { - self.config.event_stream_server.port + self.event_server_config.event_stream_server.port + } + + pub(crate) fn has_db_configured(&self) -> bool { + self.storage_config.is_enabled() } } diff --git a/event_sidecar/src/tests.rs b/event_sidecar/src/tests.rs new file mode 100644 index 00000000..a9722ec7 --- /dev/null +++ b/event_sidecar/src/tests.rs @@ -0,0 +1,54 @@ +use crate::rest_server::path_abstraction_for_metrics; + +pub mod integration_tests; +pub mod integration_tests_version_switch; +pub mod performance_tests; + +#[test] +fn path_abstraction_for_metrics_should_handle_endpoints() { + test_single_nested_path("block"); + test_single_nested_path("step"); + test_single_nested_path("faults"); + test_single_nested_path("signatures"); + expect_output("/transaction/deploy/123", "/transaction/(...)"); + expect_output( + "/transaction/accepted/deploy/123", + "/transaction/accepted/(...)", + ); + expect_output( + "/transaction/accepted/version1/123", + "/transaction/accepted/(...)", + ); + expect_output( + "/transaction/processed/deploy/123", + "/transaction/processed/(...)", + ); + expect_output( + "/transaction/processed/version1/123", + "/transaction/processed/(...)", + ); + expect_output( + "/transaction/expired/deploy/123", + "/transaction/expired/(...)", + ); + expect_output( + "/transaction/expired/version1/123", + "/transaction/expired/(...)", + ); + expect_output("/xyz", "unknown"); + expect_output("/", "unknown"); + expect_output("", "unknown"); +} + +fn test_single_nested_path(part: &str) { + let expected_output = &format!("/{}/(...)", part); + expect_output(part, expected_output); + expect_output(&format!("/{part}"), expected_output); + expect_output(&format!("/{part}/"), expected_output); + expect_output(&format!("/{part}/abc/def/ghi"), expected_output); + expect_output(&format!("{part}/abc/def/ghi"), expected_output); +} + +fn expect_output(input: &str, output: &str) { + assert_eq!(path_abstraction_for_metrics(input), output); +} diff --git a/sidecar/src/tests/integration_tests.rs b/event_sidecar/src/tests/integration_tests.rs similarity index 65% rename from sidecar/src/tests/integration_tests.rs rename to event_sidecar/src/tests/integration_tests.rs index 1653226b..feabf53e 100644 --- a/sidecar/src/tests/integration_tests.rs +++ b/event_sidecar/src/tests/integration_tests.rs @@ -1,36 +1,36 @@ use bytes::Bytes; -use casper_event_types::sse_data::{test_support::*, SseData}; +use casper_event_types::{legacy_sse_data::LegacySseData, sse_data::test_support::*}; use casper_types::testing::TestRng; use core::time; use eventsource_stream::{Event, EventStream, Eventsource}; use futures::Stream; use futures_util::StreamExt; -use http::StatusCode; +use reqwest::StatusCode; use std::{fmt::Debug, time::Duration}; use tempfile::{tempdir, TempDir}; use tokio::{sync::mpsc, time::sleep}; use crate::{ - database::sqlite_database::SqliteDatabase, + database::{sqlite_database::SqliteDatabase, types::SseEnvelope}, run, testing::{ mock_node::tests::{MockNode, MockNodeBuilder}, raw_sse_events_utils::tests::{ - random_n_block_added, sse_server_example_1_5_2_data, - sse_server_example_1_5_2_data_second, sse_server_example_1_5_2_data_third, - sse_server_shutdown_1_5_2_data, EventsWithIds, + random_n_block_added, sse_server_example_2_0_0_data, + sse_server_example_2_0_0_data_second, sse_server_example_2_0_0_data_third, + sse_server_shutdown_2_0_0_data, sse_server_sigs_2_0_0_data, EventsWithIds, }, - shared::EventType, testing_config::{prepare_config, TestingConfig}, }, types::{ - database::DatabaseWriter, + database::{Database, DatabaseWriter}, sse_events::{BlockAdded, Fault}, }, utils::tests::{ - any_string_contains, build_test_config, build_test_config_with_retries, - build_test_config_without_connections, start_nodes_and_wait, start_sidecar, - stop_nodes_and_wait, wait_for_n_messages, + any_string_contains, build_test_config, build_test_config_with_network_name, + build_test_config_with_retries, build_test_config_without_connections, + build_test_config_without_db_storage, start_nodes_and_wait, start_sidecar, + start_sidecar_with_rest_api, stop_nodes_and_wait, wait_for_n_messages, }, }; @@ -38,15 +38,23 @@ use crate::{ async fn should_not_allow_zero_max_attempts() { let temp_storage_dir = tempdir().expect("Should have created a temporary storage directory"); - let mut testing_config = prepare_config(&temp_storage_dir); + let mut testing_config = prepare_config(&temp_storage_dir, true, None); let sse_port_for_node = testing_config.add_connection(None, None, None); testing_config.set_retries_for_node(sse_port_for_node, 0, 0); - - let shutdown_error = run(testing_config.inner()) + let sqlite_database = SqliteDatabase::new_from_config(&testing_config.storage_config) .await - .expect_err("Sidecar should return an Err on shutdown"); + .expect("database should start"); + let storage_folder = testing_config.get_storage_folder(); + let shutdown_error = run( + testing_config.inner(), + storage_folder, + Some(Database::SqliteDatabaseWrapper(sqlite_database)), + None, + ) + .await + .expect_err("Sidecar should return an Err on shutdown"); assert_eq!( shutdown_error.to_string(), @@ -64,16 +72,16 @@ async fn given_sidecar_when_only_node_shuts_down_then_shut_down() { event_stream_server_port, ) = build_test_config(); - //MockNode::new should only have /events/main and /events sse endpoints, + //MockNode::new should only have /events and /events sse endpoints, // simulating a situation when a node doesn't expose all endpoints. - let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - let sidecar_join = start_sidecar(testing_config.inner()).await; + let sidecar_join = start_sidecar(testing_config).await; let (_, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; @@ -97,12 +105,68 @@ async fn should_allow_client_connection_to_sse() { node_port_for_rest_connection, event_stream_server_port, ) = build_test_config(); - let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( + node_port_for_sse_connection, + node_port_for_rest_connection, + ); + start_nodes_and_wait(vec![&mut node_mock]).await; + start_sidecar(testing_config).await; + let (join_handle, receiver) = + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; + wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; + stop_nodes_and_wait(vec![&mut node_mock]).await; + + let events_received = tokio::join!(join_handle).0.unwrap(); + assert_eq!(events_received.len(), 2); + assert!( + events_received[0].contains("\"ApiVersion\""), + "First event should be ApiVersion" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn should_allow_client_connection_to_sse_without_db_storage() { + let ( + testing_config, + _temp_storage_dir, + node_port_for_sse_connection, + node_port_for_rest_connection, + event_stream_server_port, + ) = build_test_config_without_db_storage(); + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; + let (join_handle, receiver) = + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; + wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; + stop_nodes_and_wait(vec![&mut node_mock]).await; + + let events_received = tokio::join!(join_handle).0.unwrap(); + assert_eq!(events_received.len(), 2); + assert!( + events_received[0].contains("\"ApiVersion\""), + "First event should be ApiVersion" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn should_translate_events_on_main_endpoint() { + let ( + testing_config, + _temp_storage_dir, + node_port_for_sse_connection, + node_port_for_rest_connection, + event_stream_server_port, + ) = build_test_config(); + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( + node_port_for_sse_connection, + node_port_for_rest_connection, + ); + start_nodes_and_wait(vec![&mut node_mock]).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; @@ -114,6 +178,47 @@ async fn should_allow_client_connection_to_sse() { events_received[0].contains("\"ApiVersion\""), "First event should be ApiVersion" ); + let legacy_block_added = serde_json::from_str::(&events_received[1]) + .expect("Should have parsed legacy BlockAdded from string"); + assert!(matches!( + legacy_block_added, + LegacySseData::BlockAdded { .. } + )); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn should_translate_events_on_sigs_endpoint() { + let ( + testing_config, + _temp_storage_dir, + node_port_for_sse_connection, + node_port_for_rest_connection, + event_stream_server_port, + ) = build_test_config(); + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node_with_data( + node_port_for_sse_connection, + node_port_for_rest_connection, + sse_server_sigs_2_0_0_data(), + ); + start_nodes_and_wait(vec![&mut node_mock]).await; + start_sidecar(testing_config).await; + let (join_handle, receiver) = + fetch_data_from_endpoint("/events/sigs?start_from=0", event_stream_server_port).await; + wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; + stop_nodes_and_wait(vec![&mut node_mock]).await; + + let events_received = tokio::join!(join_handle).0.unwrap(); + assert_eq!(events_received.len(), 2); + assert!( + events_received[0].contains("\"ApiVersion\""), + "First event should be ApiVersion" + ); + let legacy_finality_signature = serde_json::from_str::(&events_received[1]) + .expect("Should have parsed legacy FinalitySignature from string"); + assert!(matches!( + legacy_finality_signature, + LegacySseData::FinalitySignature { .. } + )); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -125,15 +230,15 @@ async fn should_respond_to_rest_query() { node_port_for_rest_connection, event_stream_server_port, ) = build_test_config(); - let sidecar_rest_server_port = testing_config.rest_server_port(); - let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( + let sidecar_rest_server_port = testing_config.rest_api_server_config.port; + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar_with_rest_api(testing_config).await; let (_, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; let block_request_url = format!("http://127.0.0.1:{}/block", sidecar_rest_server_port); @@ -149,7 +254,7 @@ async fn should_respond_to_rest_query() { .bytes() .await .expect("Should have got bytes from response"); - serde_json::from_slice::(&response_bytes) + serde_json::from_slice::>(&response_bytes) .expect("Should have parsed BlockAdded from bytes"); } @@ -162,14 +267,14 @@ async fn should_allow_partial_connection_on_one_filter() { node_port_for_rest_connection, event_stream_server_port, ) = build_test_config(); - let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(30)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; @@ -177,19 +282,6 @@ async fn should_allow_partial_connection_on_one_filter() { assert!(!events_received.is_empty()); } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn should_allow_partial_connection_on_two_filters() { - let received_event_types = partial_connection_test(true).await; - assert_eq!(received_event_types.len(), 1) -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn should_disallow_partial_connection_on_one_filter() { - let received_event_types = partial_connection_test(false).await; - //There should only be ApiVersion - assert!(received_event_types.is_empty()) -} - #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn should_fail_to_reconnect() { let test_rng = TestRng::new(); @@ -199,10 +291,11 @@ async fn should_fail_to_reconnect() { node_port_for_sse_connection, node_port_for_rest_connection, event_stream_server_port, - ) = build_test_config_with_retries(2, 2); + ) = build_test_config_with_retries(2, 2, true, None); let (data_of_node, test_rng) = random_n_block_added(30, 0, test_rng); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, cache_of_node: None, sse_port: Some(node_port_for_sse_connection), @@ -210,9 +303,9 @@ async fn should_fail_to_reconnect() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(31, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; @@ -220,7 +313,8 @@ async fn should_fail_to_reconnect() { let (data_of_node, _) = random_n_block_added(30, 31, test_rng); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, cache_of_node: None, sse_port: Some(node_port_for_sse_connection), @@ -246,10 +340,11 @@ async fn should_reconnect() { node_port_for_sse_connection, node_port_for_rest_connection, event_stream_server_port, - ) = build_test_config_with_retries(10, 1); + ) = build_test_config_with_retries(10, 1, true, None); let (data_of_node, test_rng) = random_n_block_added(30, 0, test_rng); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, cache_of_node: None, sse_port: Some(node_port_for_sse_connection), @@ -257,15 +352,16 @@ async fn should_reconnect() { } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(31, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; let (data_of_node, _) = random_n_block_added(30, 31, test_rng); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, cache_of_node: None, sse_port: Some(node_port_for_sse_connection), @@ -292,29 +388,30 @@ async fn shutdown_should_be_passed_through() { event_stream_server_port, ) = build_test_config(); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), - data_of_node: sse_server_shutdown_1_5_2_data(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), + data_of_node: sse_server_shutdown_2_0_0_data(), cache_of_node: None, sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(events_received.get(1).unwrap().contains("\"Shutdown\"")); assert!(events_received.get(2).unwrap().contains("\"BlockAdded\"")); } #[tokio::test(flavor = "multi_thread", worker_threads = 5)] -async fn connecting_to_node_prior_to_1_5_2_should_fail() { +async fn connecting_to_node_prior_to_2_0_0_should_fail() { let ( testing_config, _temp_storage_dir, @@ -323,17 +420,61 @@ async fn connecting_to_node_prior_to_1_5_2_should_fail() { event_stream_server_port, ) = build_test_config(); let mut node_mock = MockNodeBuilder { - version: "1.5.1".to_string(), - data_of_node: sse_server_shutdown_1_5_2_data(), + version: "1.9.9".to_string(), + network_name: "network-1".to_string(), + data_of_node: sse_server_shutdown_2_0_0_data(), + cache_of_node: None, + sse_port: Some(node_port_for_sse_connection), + rest_port: Some(node_port_for_rest_connection), + } + .build(); + start_nodes_and_wait(vec![&mut node_mock]).await; + let sidecar_join = start_sidecar(testing_config).await; + let (join_handle, _) = fetch_data_from_endpoint_with_panic_flag( + "/events?start_from=0", + event_stream_server_port, + false, + ) + .await; + sleep(Duration::from_secs(10)).await; //Give some time for sidecar to read data from node (which it actually shouldn't do in this scenario) + stop_nodes_and_wait(vec![&mut node_mock]).await; + + let events_received = tokio::join!(join_handle).0.unwrap(); + assert_eq!(events_received.len(), 0); + + let shutdown_err = sidecar_join + .await + .unwrap() + .expect_err("Sidecar should return an Err message on shutdown"); + + assert_eq!( + shutdown_err.to_string(), + "Connected node(s) are unavailable" + ) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 5)] +async fn connecting_to_node_with_wrong_network_name_should_fail() { + let ( + testing_config, + _temp_storage_dir, + node_port_for_sse_connection, + node_port_for_rest_connection, + event_stream_server_port, + ) = build_test_config_with_network_name("network-1"); + let mut node_mock = MockNodeBuilder { + version: "2.0.0".to_string(), + network_name: "not-network-1".to_string(), + data_of_node: sse_server_shutdown_2_0_0_data(), cache_of_node: None, sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + let sidecar_join = start_sidecar(testing_config).await; let (join_handle, _) = fetch_data_from_endpoint_with_panic_flag( - "/events/main?start_from=0", + "/events?start_from=0", event_stream_server_port, false, ) @@ -343,6 +484,16 @@ async fn connecting_to_node_prior_to_1_5_2_should_fail() { let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 0); + + let shutdown_err = sidecar_join + .await + .unwrap() + .expect_err("Sidecar should return an Err message on shutdown"); + + assert_eq!( + shutdown_err.to_string(), + "Connected node(s) are unavailable" + ) } #[tokio::test(flavor = "multi_thread", worker_threads = 5)] @@ -355,23 +506,25 @@ async fn shutdown_should_be_passed_through_when_versions_change() { event_stream_server_port, ) = build_test_config(); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), - data_of_node: sse_server_shutdown_1_5_2_data(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), + data_of_node: sse_server_shutdown_2_0_0_data(), cache_of_node: None, sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; let mut node_mock = MockNodeBuilder::build_example_node_with_version( Some(node_port_for_sse_connection), Some(node_port_for_rest_connection), - "1.5.3", + "2.0.1", + "network-1", ); start_nodes_and_wait(vec![&mut node_mock]).await; wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; @@ -379,10 +532,10 @@ async fn shutdown_should_be_passed_through_when_versions_change() { let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 5); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(events_received.get(1).unwrap().contains("\"Shutdown\"")); assert!(events_received.get(2).unwrap().contains("\"BlockAdded\"")); - assert!(events_received.get(3).unwrap().contains("\"1.5.3\"")); + assert!(events_received.get(3).unwrap().contains("\"2.0.1\"")); assert!(events_received.get(4).unwrap().contains("\"BlockAdded\"")); } @@ -395,12 +548,12 @@ async fn should_produce_shutdown_to_sidecar_endpoint() { node_port_for_rest_connection, event_stream_server_port, ) = build_test_config(); - let mut node_mock = MockNodeBuilder::build_example_1_5_2_node( + let mut node_mock = MockNodeBuilder::build_example_2_0_0_node( node_port_for_sse_connection, node_port_for_rest_connection, ); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = fetch_data_from_endpoint("/events/sidecar", event_stream_server_port).await; stop_nodes_and_wait(vec![&mut node_mock]).await; @@ -409,7 +562,7 @@ async fn should_produce_shutdown_to_sidecar_endpoint() { let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 2); assert!(events_received - .get(0) + .first() .unwrap() .contains("\"SidecarVersion\"")); assert!(events_received.get(1).unwrap().contains("\"Shutdown\"")); @@ -426,25 +579,26 @@ async fn sidecar_should_use_start_from_if_database_is_empty() { ) = build_test_config(); let data_of_node = vec![( Some("2".to_string()), - example_block_added_1_5_2(BLOCK_HASH_3, "3"), + example_block_added_2_0_0(BLOCK_HASH_3, 3u64), )]; let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, - cache_of_node: Some(sse_server_example_1_5_2_data()), + cache_of_node: Some(sse_server_example_2_0_0_data()), sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(events_received.get(1).unwrap().contains("\"BlockAdded\"")); assert!(events_received.get(2).unwrap().contains("\"BlockAdded\"")); } @@ -460,31 +614,38 @@ async fn sidecar_should_use_start_from_if_database_is_not_empty() { event_stream_server_port, ) = build_test_config(); //Prepopulating database - let sqlite_database = SqliteDatabase::new_from_config(&testing_config.config.storage) + let sqlite_database = SqliteDatabase::new_from_config(&testing_config.storage_config) .await .expect("database should start"); sqlite_database - .save_fault(Fault::random(&mut rng), 0, "127.0.0.1".to_string()) + .save_fault( + Fault::random(&mut rng), + 0, + "127.0.0.1".to_string(), + "1.1.1".to_string(), + "network-1".to_string(), + ) .await .unwrap(); let mut node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), - data_of_node: sse_server_example_1_5_2_data_second(), - cache_of_node: Some(sse_server_example_1_5_2_data()), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), + data_of_node: sse_server_example_2_0_0_data_second(), + cache_of_node: Some(sse_server_example_2_0_0_data()), sse_port: Some(node_port_for_sse_connection), rest_port: Some(node_port_for_rest_connection), } .build(); start_nodes_and_wait(vec![&mut node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(1, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(events_received.get(1).unwrap().contains("\"BlockAdded\"")); assert!(events_received.get(2).unwrap().contains("\"BlockAdded\"")); } @@ -492,13 +653,13 @@ async fn sidecar_should_use_start_from_if_database_is_not_empty() { #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn sidecar_should_connect_to_multiple_nodes() { let (sse_port_1, rest_port_1, mut mock_node_1) = - build_1_5_2(sse_server_example_1_5_2_data()).await; + build_2_0_0(sse_server_example_2_0_0_data()).await; mock_node_1.start().await; let (sse_port_2, rest_port_2, mut mock_node_2) = - build_1_5_2(sse_server_example_1_5_2_data_second()).await; + build_2_0_0(sse_server_example_2_0_0_data_second()).await; mock_node_2.start().await; let (sse_port_3, rest_port_3, mut mock_node_3) = - build_1_5_2(sse_server_example_1_5_2_data_third()).await; + build_2_0_0(sse_server_example_2_0_0_data_third()).await; mock_node_3.start().await; let (testing_config, event_stream_server_port, _temp_storage_dir) = build_testing_config_based_on_ports(vec![ @@ -506,16 +667,16 @@ async fn sidecar_should_connect_to_multiple_nodes() { (sse_port_2, rest_port_2), (sse_port_3, rest_port_3), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(4, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut mock_node_1, &mut mock_node_2, &mut mock_node_3]).await; let events_received = tokio::join!(join_handle).0.unwrap(); let length = events_received.len(); assert_eq!(length, 4); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(any_string_contains( &events_received, format!("\"{BLOCK_HASH_2}\"") @@ -533,18 +694,18 @@ async fn sidecar_should_connect_to_multiple_nodes() { #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn sidecar_should_not_downgrade_api_version_when_new_nodes_disconnect() { let (sse_port_1, rest_port_1, mut mock_node_1) = - build_1_5_2(sse_server_example_1_5_2_data()).await; + build_2_0_0(sse_server_example_2_0_0_data()).await; mock_node_1.start().await; let (sse_port_2, rest_port_2, mut mock_node_2) = - build_1_5_2(sse_server_example_1_5_2_data_second()).await; + build_2_0_0(sse_server_example_2_0_0_data_second()).await; let (testing_config, event_stream_server_port, _temp_storage_dir) = build_testing_config_based_on_ports(vec![ (sse_port_1, rest_port_1), (sse_port_2, rest_port_2), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; let receiver = wait_for_n_messages(2, receiver, Duration::from_secs(120)).await; mock_node_1.stop().await; mock_node_2.start().await; @@ -553,7 +714,7 @@ async fn sidecar_should_not_downgrade_api_version_when_new_nodes_disconnect() { let events_received = tokio::join!(join_handle).0.unwrap(); let length = events_received.len(); assert_eq!(length, 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(any_string_contains( &events_received, format!("\"{BLOCK_HASH_2}\"") @@ -567,24 +728,24 @@ async fn sidecar_should_not_downgrade_api_version_when_new_nodes_disconnect() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn sidecar_should_report_only_one_api_version_if_there_was_no_update() { let (sse_port_1, rest_port_1, mut mock_node_1) = - build_1_5_2(sse_server_example_1_5_2_data()).await; + build_2_0_0(sse_server_example_2_0_0_data()).await; let (sse_port_2, rest_port_2, mut mock_node_2) = - build_1_5_2(sse_server_example_1_5_2_data_second()).await; + build_2_0_0(sse_server_example_2_0_0_data_second()).await; start_nodes_and_wait(vec![&mut mock_node_1, &mut mock_node_2]).await; let (testing_config, event_stream_server_port, _temp_storage_dir) = build_testing_config_based_on_ports(vec![ (sse_port_1, rest_port_1), (sse_port_2, rest_port_2), ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut mock_node_1, &mut mock_node_2]).await; let events_received = tokio::join!(join_handle).0.unwrap(); let length = events_received.len(); assert_eq!(length, 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(any_string_contains( &events_received, format!("\"{BLOCK_HASH_2}\"") @@ -598,9 +759,9 @@ async fn sidecar_should_report_only_one_api_version_if_there_was_no_update() { #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn sidecar_should_connect_to_multiple_nodes_even_if_some_of_them_dont_respond() { let (sse_port_1, rest_port_1, mut mock_node_1) = - build_1_5_2(sse_server_example_1_5_2_data()).await; + build_2_0_0(sse_server_example_2_0_0_data()).await; let (sse_port_2, rest_port_2, mut mock_node_2) = - build_1_5_2(sse_server_example_1_5_2_data_second()).await; + build_2_0_0(sse_server_example_2_0_0_data_second()).await; start_nodes_and_wait(vec![&mut mock_node_1, &mut mock_node_2]).await; let (testing_config, event_stream_server_port, _temp_storage_dir) = build_testing_config_based_on_ports(vec![ @@ -608,16 +769,16 @@ async fn sidecar_should_connect_to_multiple_nodes_even_if_some_of_them_dont_resp (sse_port_2, rest_port_2), (8888, 9999), //Ports which should be not occupied ]); - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config).await; let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; wait_for_n_messages(3, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut mock_node_1, &mut mock_node_2]).await; let events_received = tokio::join!(join_handle).0.unwrap(); let length = events_received.len(); assert_eq!(length, 3); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); assert!(any_string_contains( &events_received, format!("\"{BLOCK_HASH_2}\"") @@ -628,34 +789,6 @@ async fn sidecar_should_connect_to_multiple_nodes_even_if_some_of_them_dont_resp )); } -async fn partial_connection_test(allow_partial_connection: bool) -> Vec { - // Prepare the mock node, by the "default" config it should have only the /events and /events/main endpoints - let (sse_port, rest_port, mut node_mock) = build_1_5_2(sse_server_example_1_5_2_data()).await; - // Setup config for the sidecar - // - Set the sidecar to reattempt connection only once after a 2 second delay. - // - Allow partial based on the value passed to the function. - let (mut testing_config, event_stream_server_port, _temp_storage_dir) = - build_testing_config_based_on_ports(vec![(sse_port, rest_port)]); - testing_config.set_allow_partial_connection_for_node(sse_port, allow_partial_connection); - // Start the mock node - start_nodes_and_wait(vec![&mut node_mock]).await; - - // Run the Sidecar in another task with the prepared config. - start_sidecar(testing_config.inner()).await; - let (join_handle, receiver) = - fetch_data_from_endpoint("/events/main?start_from=0", event_stream_server_port).await; - let _ = wait_for_n_messages(1, receiver, Duration::from_secs(60)).await; - //We need to stop the mock node, otherwise `join_handle` might never finish (it listens to events until sidecar stops) - stop_nodes_and_wait(vec![&mut node_mock]).await; - let events_received = tokio::join!(join_handle).0.unwrap(); - let collected: Vec = events_received - .iter() - .map(|raw_events| serde_json::from_str::(raw_events).unwrap().into()) - .filter(|t: &EventType| *t != EventType::ApiVersion) - .collect(); - collected -} - pub async fn try_connect_to_single_stream( url: &str, ) -> Option> + Sized>> { @@ -725,9 +858,10 @@ pub async fn fetch_data_from_endpoint_with_panic_flag( (join, receiver) } -pub async fn build_1_5_2(data_of_node: EventsWithIds) -> (u16, u16, MockNode) { +pub async fn build_2_0_0(data_of_node: EventsWithIds) -> (u16, u16, MockNode) { let node_mock = MockNodeBuilder { - version: "1.5.2".to_string(), + version: "2.0.0".to_string(), + network_name: "network-1".to_string(), data_of_node, cache_of_node: None, sse_port: None, @@ -745,7 +879,7 @@ pub fn build_testing_config_based_on_ports( ports_of_nodes: Vec<(u16, u16)>, ) -> (TestingConfig, u16, TempDir) { let (mut testing_config, temp_storage_dir, event_stream_server_port) = - build_test_config_without_connections(); + build_test_config_without_connections(true, None); for (sse_port, rest_port) in ports_of_nodes { testing_config.add_connection(None, Some(sse_port), Some(rest_port)); testing_config.set_retries_for_node(sse_port, 5, 2); diff --git a/sidecar/src/tests/integration_tests_version_switch.rs b/event_sidecar/src/tests/integration_tests_version_switch.rs similarity index 58% rename from sidecar/src/tests/integration_tests_version_switch.rs rename to event_sidecar/src/tests/integration_tests_version_switch.rs index 4ca37433..129ebbca 100644 --- a/sidecar/src/tests/integration_tests_version_switch.rs +++ b/event_sidecar/src/tests/integration_tests_version_switch.rs @@ -13,18 +13,17 @@ pub mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn should_successfully_switch_api_versions() { - let mut node_mock = MockNodeBuilder::build_example_node_with_version(None, None, "1.5.2"); + let mut node_mock = + MockNodeBuilder::build_example_node_with_version(None, None, "2.0.0", "network-1"); let properties = prepare_one_node_and_start(&mut node_mock).await; - let (join_handle, receiver) = fetch_data_from_endpoint( - "/events/main?start_from=0", - properties.event_stream_server_port, - ) - .await; + let (join_handle, receiver) = + fetch_data_from_endpoint("/events?start_from=0", properties.event_stream_server_port) + .await; let receiver = wait_for_n_messages(1, receiver, Duration::from_secs(120)).await; stop_nodes_and_wait(vec![&mut node_mock]).await; - //At this point node 1.5.2 should be gone, set up 1.5.3 below - let mut node_mock = MockNodeBuilder::build_example_1_5_3_node( + //At this point node 2.0.0 should be gone, set up 2.0.1 below + let mut node_mock = MockNodeBuilder::build_example_2_0_1_node( properties.node_port_for_sse_connection, properties.node_port_for_rest_connection, ); @@ -34,13 +33,13 @@ pub mod tests { let events_received = tokio::join!(join_handle).0.unwrap(); assert_eq!(events_received.len(), 4); - assert!(events_received.get(0).unwrap().contains("\"1.5.2\"")); - //block hash for 1.5.2 - let block_entry_1_5_2 = events_received.get(1).unwrap(); - assert!(block_entry_1_5_2.contains(format!("\"{BLOCK_HASH_2}\"").as_str())); - assert!(events_received.get(2).unwrap().contains("\"1.5.3\"")); - //block hash for 1.5.3 - let block_entry_1_5_3 = events_received.get(3).unwrap(); - assert!(block_entry_1_5_3.contains(format!("\"{BLOCK_HASH_3}\"").as_str())); + assert!(events_received.first().unwrap().contains("\"2.0.0\"")); + //block hash for 2.0.0 + let block_entry_2_0_0 = events_received.get(1).unwrap(); + assert!(block_entry_2_0_0.contains(format!("\"{BLOCK_HASH_2}\"").as_str())); + assert!(events_received.get(2).unwrap().contains("\"2.0.1\"")); + //block hash for 2.0.1 + let block_entry_2_0_1 = events_received.get(3).unwrap(); + assert!(block_entry_2_0_1.contains(format!("\"{BLOCK_HASH_3}\"").as_str())); } } diff --git a/sidecar/src/tests/performance_tests.rs b/event_sidecar/src/tests/performance_tests.rs similarity index 89% rename from sidecar/src/tests/performance_tests.rs rename to event_sidecar/src/tests/performance_tests.rs index bc3d3f0f..8d11de36 100644 --- a/sidecar/src/tests/performance_tests.rs +++ b/event_sidecar/src/tests/performance_tests.rs @@ -1,10 +1,11 @@ use crate::{ database::postgresql_database::PostgreSqlDatabase, - run, testing::fake_event_stream::Bound, tests::integration_tests::fetch_data_from_endpoint, types::database::DatabaseReader, - utils::tests::{build_postgres_based_test_config, wait_for_n_messages}, + utils::tests::{ + build_postgres_based_test_config, unpack_test_config_and_run, wait_for_n_messages, + }, }; use crate::{ event_stream_server::Config as EssConfig, @@ -70,11 +71,11 @@ async fn check_latency_on_load_testing_step_scenario() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -async fn check_latency_on_load_testing_deploys_scenario() { +async fn check_latency_on_load_testing_transactions_scenario() { let duration = Duration::from_secs(60); performance_check( - Scenario::LoadTestingDeploy( + Scenario::LoadTestingTransaction( GenericScenarioSettings::new(Bound::Timed(duration), None), 20, ), @@ -105,7 +106,7 @@ async fn test_event_throughput() { node_port_for_sse_connection, node_port_for_rest_connection, )); - tokio::spawn(run(testing_config.inner())); + tokio::spawn(async move { unpack_test_config_and_run(testing_config, false).await }); let handle = start_counting_outbound_events(cancellation_token.clone(), event_stream_server_port).await; @@ -150,9 +151,9 @@ pub(crate) enum EventType { ApiVersion, SidecarVersion, BlockAdded, - DeployAccepted, - DeployExpired, - DeployProcessed, + TransactionAccepted, + TransactionExpired, + TransactionProcessed, Fault, FinalitySignature, Step, @@ -165,9 +166,9 @@ impl From for EventType { SseData::ApiVersion(_) => EventType::ApiVersion, SseData::SidecarVersion(_) => EventType::SidecarVersion, SseData::BlockAdded { .. } => EventType::BlockAdded, - SseData::DeployAccepted { .. } => EventType::DeployAccepted, - SseData::DeployProcessed { .. } => EventType::DeployProcessed, - SseData::DeployExpired { .. } => EventType::DeployExpired, + SseData::TransactionAccepted { .. } => EventType::TransactionAccepted, + SseData::TransactionProcessed { .. } => EventType::TransactionProcessed, + SseData::TransactionExpired { .. } => EventType::TransactionExpired, SseData::Fault { .. } => EventType::Fault, SseData::FinalitySignature(_) => EventType::FinalitySignature, SseData::Step { .. } => EventType::Step, @@ -182,9 +183,9 @@ impl Display for EventType { EventType::ApiVersion => "ApiVersion", EventType::SidecarVersion => "SidecarVersion", EventType::BlockAdded => "BlockAdded", - EventType::DeployAccepted => "DeployAccepted", - EventType::DeployExpired => "DeployExpired", - EventType::DeployProcessed => "DeployProcessed", + EventType::TransactionAccepted => "TransactionAccepted", + EventType::TransactionExpired => "TransactionExpired", + EventType::TransactionProcessed => "TransactionProcessed", EventType::Fault => "Fault", EventType::FinalitySignature => "FinalitySignature", EventType::Step => "Step", @@ -204,9 +205,11 @@ impl TimestampedEvent { SseData::ApiVersion(_) => "ApiVersion".to_string(), SseData::SidecarVersion(_) => "SidecarVersion".to_string(), SseData::BlockAdded { block_hash, .. } => block_hash.to_string(), - SseData::DeployAccepted { deploy } => deploy.hash().to_string(), - SseData::DeployProcessed { deploy_hash, .. } => deploy_hash.to_string(), - SseData::DeployExpired { deploy_hash } => deploy_hash.to_string(), + SseData::TransactionAccepted(transaction) => transaction.hash().to_string(), + SseData::TransactionProcessed { + transaction_hash, .. + } => transaction_hash.to_string(), + SseData::TransactionExpired { transaction_hash } => transaction_hash.to_string(), SseData::Fault { era_id, public_key, .. } => format!("{}-{}", era_id.value(), public_key.to_hex()), @@ -224,9 +227,9 @@ impl TimestampedEvent { match (&self.event, &other.event) { (SseData::ApiVersion(_), SseData::ApiVersion(_)) | (SseData::BlockAdded { .. }, SseData::BlockAdded { .. }) - | (SseData::DeployAccepted { .. }, SseData::DeployAccepted { .. }) - | (SseData::DeployProcessed { .. }, SseData::DeployProcessed { .. }) - | (SseData::DeployExpired { .. }, SseData::DeployExpired { .. }) + | (SseData::TransactionAccepted { .. }, SseData::TransactionAccepted { .. }) + | (SseData::TransactionProcessed { .. }, SseData::TransactionProcessed { .. }) + | (SseData::TransactionExpired { .. }, SseData::TransactionExpired { .. }) | (SseData::Fault { .. }, SseData::Fault { .. }) | (SseData::FinalitySignature(_), SseData::FinalitySignature(_)) | (SseData::Step { .. }, SseData::Step { .. }) @@ -261,10 +264,20 @@ async fn performance_check(scenario: Scenario, duration: Duration, acceptable_la let test_rng = TestRng::new(); let temp_storage_dir = tempdir().expect("Should have created a temporary storage directory"); - let mut testing_config = prepare_config(&temp_storage_dir); + let mut testing_config = prepare_config(&temp_storage_dir, true, None); testing_config.add_connection(None, None, None); - let node_port_for_sse_connection = testing_config.config.connections.get(0).unwrap().sse_port; - let node_port_for_rest_connection = testing_config.config.connections.get(0).unwrap().rest_port; + let node_port_for_sse_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .sse_port; + let node_port_for_rest_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .rest_port; let (_shutdown_tx, _after_shutdown_rx) = setup_mock_build_version_server(node_port_for_rest_connection).await; @@ -272,7 +285,7 @@ async fn performance_check(scenario: Scenario, duration: Duration, acceptable_la tokio::spawn(spin_up_fake_event_stream(test_rng, ess_config, scenario)); - tokio::spawn(run(testing_config.inner())); + tokio::spawn(async move { unpack_test_config_and_run(testing_config, false).await }); tokio::time::sleep(Duration::from_secs(1)).await; let ip_address = IpAddr::from_str("127.0.0.1").expect("Couldn't parse IpAddr"); @@ -280,6 +293,7 @@ async fn performance_check(scenario: Scenario, duration: Duration, acceptable_la ip_address, sse_port: node_port_for_sse_connection, rest_port: node_port_for_rest_connection, + network_name: None, }; let (node_event_tx, node_event_rx) = mpsc::channel(100); let mut node_event_listener = EventListenerBuilder { @@ -308,6 +322,7 @@ async fn performance_check(scenario: Scenario, duration: Duration, acceptable_la ip_address: IpAddr::from_str("127.0.0.1").expect("Couldn't parse IpAddr"), sse_port: node_port_for_sse_connection, rest_port: node_port_for_rest_connection, + network_name: None, }; let mut sidecar_event_listener = EventListenerBuilder { node: sidecar_node_interface, @@ -345,11 +360,11 @@ async fn performance_check(scenario: Scenario, duration: Duration, acceptable_la let event_types_ordered_for_efficiency = vec![ EventType::FinalitySignature, - EventType::DeployAccepted, - EventType::DeployProcessed, + EventType::TransactionAccepted, + EventType::TransactionProcessed, EventType::BlockAdded, EventType::Step, - EventType::DeployExpired, + EventType::TransactionExpired, EventType::Fault, ]; @@ -374,7 +389,7 @@ async fn live_performance_check( acceptable_latency: Duration, ) { let temp_storage_dir = tempdir().expect("Should have created a temporary storage directory"); - let mut testing_config = prepare_config(&temp_storage_dir); + let mut testing_config = prepare_config(&temp_storage_dir, true); let port_for_connection = testing_config.add_connection(Some(ip_address.clone()), Some(port)); tokio::spawn(run(testing_config.inner())); @@ -420,11 +435,11 @@ async fn live_performance_check( let event_types_ordered_for_efficiency = vec![ EventType::FinalitySignature, - EventType::DeployAccepted, - EventType::DeployProcessed, + EventType::TransactionAccepted, + EventType::TransactionProcessed, EventType::BlockAdded, EventType::Step, - EventType::DeployExpired, + EventType::TransactionExpired, EventType::Fault, ]; @@ -446,9 +461,9 @@ fn check_latencies_are_acceptable( ) { let event_types = vec![ EventType::BlockAdded, - EventType::DeployAccepted, - EventType::DeployExpired, - EventType::DeployProcessed, + EventType::TransactionAccepted, + EventType::TransactionExpired, + EventType::TransactionProcessed, EventType::Fault, EventType::FinalitySignature, EventType::Step, @@ -475,9 +490,9 @@ fn create_results_from_data( ) -> Vec { let event_types_ordered_for_display = vec![ EventType::BlockAdded, - EventType::DeployAccepted, - EventType::DeployExpired, - EventType::DeployProcessed, + EventType::TransactionAccepted, + EventType::TransactionExpired, + EventType::TransactionProcessed, EventType::Fault, EventType::FinalitySignature, EventType::Step, @@ -665,7 +680,7 @@ async fn start_counting_outbound_events( event_stream_server_port: u16, ) -> JoinHandle { let (_, receiver) = - fetch_data_from_endpoint("/events/deploys?start_from=0", event_stream_server_port).await; + fetch_data_from_endpoint("/events?start_from=0", event_stream_server_port).await; let mut receiver = wait_for_n_messages(1, receiver, Duration::from_secs(120)).await; tokio::spawn(async move { let mut counter = 0; diff --git a/sidecar/src/types.rs b/event_sidecar/src/types.rs similarity index 100% rename from sidecar/src/types.rs rename to event_sidecar/src/types.rs diff --git a/sidecar/src/types/config.rs b/event_sidecar/src/types/config.rs similarity index 54% rename from sidecar/src/types/config.rs rename to event_sidecar/src/types/config.rs index 2490cbd9..3742faa6 100644 --- a/sidecar/src/types/config.rs +++ b/event_sidecar/src/types/config.rs @@ -1,12 +1,11 @@ -use std::string::ToString; +use serde::Deserialize; use std::{ convert::{TryFrom, TryInto}, + net::IpAddr, num::ParseIntError, + string::ToString, }; -use anyhow::{Context, Error}; -use serde::Deserialize; - use crate::database::{ database_errors::DatabaseConfigError, env_vars::{ @@ -21,56 +20,55 @@ pub(crate) const DEFAULT_MAX_CONNECTIONS: u32 = 10; /// The default postgres port. pub(crate) const DEFAULT_PORT: u16 = 5432; -pub(crate) const DEFAULT_POSTGRES_STORAGE_PATH: &str = - "/casper/sidecar-storage/casper-event-sidecar"; - -pub fn read_config(config_path: &str) -> Result { - let toml_content = - std::fs::read_to_string(config_path).context("Error reading config file contents")?; - toml::from_str(&toml_content).context("Error parsing config into TOML format") +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +pub enum LegacySseApiTag { + // This tag is to point to sse endpoint of casper node in version 1.x + V1, +} +fn default_disable_event_persistence() -> bool { + false } - // This struct is used to parse the toml-formatted config file so the values can be utilised in the code. #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -#[cfg_attr(test, derive(Default))] -pub struct Config { +pub struct SseEventServerConfig { + pub enable_server: bool, + pub emulate_legacy_sse_apis: Option>, pub inbound_channel_size: Option, pub outbound_channel_size: Option, pub connections: Vec, - pub storage: StorageConfig, - pub rest_server: RestServerConfig, pub event_stream_server: EventStreamServerConfig, - pub admin_server: Option, + #[serde(default = "default_disable_event_persistence")] + pub disable_event_persistence: bool, } -#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -#[cfg_attr(test, derive(Default))] -pub struct ConfigSerdeTarget { - pub inbound_channel_size: Option, - pub outbound_channel_size: Option, - pub connections: Vec, - pub storage: Option, - pub rest_server: RestServerConfig, - pub event_stream_server: EventStreamServerConfig, - pub admin_server: Option, + +#[cfg(any(feature = "testing", test))] +impl Default for SseEventServerConfig { + fn default() -> Self { + Self { + enable_server: true, + emulate_legacy_sse_apis: Some(vec![LegacySseApiTag::V1]), + inbound_channel_size: Some(100), + outbound_channel_size: Some(100), + connections: vec![], + event_stream_server: EventStreamServerConfig::default(), + disable_event_persistence: false, + } + } } -impl TryFrom for Config { - type Error = DatabaseConfigError; - fn try_from(value: ConfigSerdeTarget) -> Result { - Ok(Config { - inbound_channel_size: value.inbound_channel_size, - outbound_channel_size: value.outbound_channel_size, - connections: value.connections, - storage: value.storage.unwrap_or_default().try_into()?, - rest_server: value.rest_server, - event_stream_server: value.event_stream_server, - admin_server: value.admin_server, - }) +impl SseEventServerConfig { + #[cfg(any(feature = "testing", test))] + pub fn default_no_persistence() -> Self { + Self { + disable_event_persistence: true, + ..Default::default() + } } } + #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] pub struct Connection { - pub ip_address: String, + pub ip_address: IpAddr, pub sse_port: u16, pub rest_port: u16, pub max_attempts: usize, @@ -83,67 +81,115 @@ pub struct Connection { } #[derive(Debug, Deserialize, Clone, PartialEq, Eq)] -#[serde(untagged)] -pub enum StorageConfig { - SqliteDbConfig { - storage_path: String, - sqlite_config: SqliteConfig, - }, - PostgreSqlDbConfig { - storage_path: String, - postgresql_config: PostgresqlConfig, - }, +pub struct StorageConfig { + pub storage_folder: String, + pub sqlite_config: Option, + pub postgresql_config: Option, } impl StorageConfig { + pub fn is_enabled(&self) -> bool { + self.sqlite_config + .as_ref() + .map(|config| config.enabled) + .unwrap_or_else(|| { + self.postgresql_config + .as_ref() + .map(|config| config.enabled) + .unwrap_or(false) + }) + } + + #[cfg(test)] + pub(crate) fn set_storage_folder(&mut self, path: String) { + self.storage_folder = path.clone(); + } + #[cfg(test)] - pub(crate) fn set_storage_path(&mut self, path: String) { - match self { - StorageConfig::SqliteDbConfig { storage_path, .. } => *storage_path = path, - StorageConfig::PostgreSqlDbConfig { storage_path, .. } => *storage_path = path, + pub(crate) fn clear_db_storage(&mut self) { + self.sqlite_config = None; + self.postgresql_config = None; + } + + #[cfg(any(feature = "testing", test))] + pub fn two_dbs() -> Self { + StorageConfig { + storage_folder: "abc".to_string(), + sqlite_config: Some(SqliteConfig::default()), + postgresql_config: Some(PostgresqlConfig::default()), } } #[cfg(test)] pub fn postgres_with_port(port: u16) -> Self { - StorageConfig::PostgreSqlDbConfig { - storage_path: "/target/test_storage".to_string(), - postgresql_config: PostgresqlConfig { + Self { + storage_folder: "storage".to_string(), + sqlite_config: None, + postgresql_config: Some(PostgresqlConfig { + enabled: true, host: "localhost".to_string(), database_name: "event_sidecar".to_string(), database_username: "postgres".to_string(), database_password: "p@$$w0rd".to_string(), max_connections_in_pool: 100, port, - }, + }), } } - pub fn get_storage_path(&self) -> String { - match self { - StorageConfig::SqliteDbConfig { storage_path, .. } => storage_path.clone(), - StorageConfig::PostgreSqlDbConfig { storage_path, .. } => storage_path.clone(), + #[cfg(any(feature = "testing", test))] + pub fn no_dbs() -> Self { + Self { + storage_folder: "storage".to_string(), + sqlite_config: None, + postgresql_config: None, } } + + #[cfg(any(feature = "testing", test))] + pub fn no_enabled_dbs() -> Self { + let sqlite_config = SqliteConfig { + enabled: false, + ..Default::default() + }; + let postgresql_config = PostgresqlConfig { + enabled: false, + ..Default::default() + }; + Self { + storage_folder: "storage".to_string(), + sqlite_config: Some(sqlite_config), + postgresql_config: Some(postgresql_config), + } + } + + pub fn is_postgres_enabled(&self) -> bool { + self.postgresql_config + .as_ref() + .map(|config| config.enabled) + .unwrap_or(false) + } + + pub fn is_sqlite_enabled(&self) -> bool { + self.sqlite_config + .as_ref() + .map(|config| config.enabled) + .unwrap_or(false) + } } #[derive(Debug, Deserialize, Clone, PartialEq, Eq)] -#[serde(untagged)] -pub enum StorageConfigSerdeTarget { - SqliteDbConfig { - storage_path: String, - sqlite_config: SqliteConfig, - }, - PostgreSqlDbConfigSerdeTarget { - storage_path: String, - postgresql_config: Option, - }, +pub struct StorageConfigSerdeTarget { + storage_folder: String, + sqlite_config: Option, + postgresql_config: Option, } impl Default for StorageConfigSerdeTarget { fn default() -> Self { - StorageConfigSerdeTarget::PostgreSqlDbConfigSerdeTarget { - storage_path: DEFAULT_POSTGRES_STORAGE_PATH.to_string(), + Self { + storage_folder: "storage".to_string(), + sqlite_config: None, postgresql_config: Some(PostgresqlConfigSerdeTarget::default()), } } @@ -152,27 +198,23 @@ impl TryFrom for StorageConfig { type Error = DatabaseConfigError; fn try_from(value: StorageConfigSerdeTarget) -> Result { - match value { - StorageConfigSerdeTarget::SqliteDbConfig { - storage_path, - sqlite_config, - } => Ok(StorageConfig::SqliteDbConfig { - storage_path, - sqlite_config, - }), - StorageConfigSerdeTarget::PostgreSqlDbConfigSerdeTarget { - storage_path, - postgresql_config, - } => Ok(StorageConfig::PostgreSqlDbConfig { - storage_path, - postgresql_config: postgresql_config.unwrap_or_default().try_into()?, - }), - } + let mut storage_cofnig = Self { + storage_folder: value.storage_folder, + ..Default::default() + }; + if let Some(config) = value.sqlite_config { + storage_cofnig.sqlite_config = Some(config); + } else if let Some(config) = value.postgresql_config { + let postgresql_config: PostgresqlConfig = config.try_into()?; + storage_cofnig.postgresql_config = Some(postgresql_config); + }; + Ok(storage_cofnig) } } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] pub struct SqliteConfig { + pub enabled: bool, pub file_name: String, pub max_connections_in_pool: u32, pub wal_autocheckpointing_interval: u16, @@ -180,6 +222,7 @@ pub struct SqliteConfig { #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] pub struct PostgresqlConfig { + pub enabled: bool, pub host: String, pub database_name: String, pub database_username: String, @@ -188,8 +231,24 @@ pub struct PostgresqlConfig { pub port: u16, } +#[cfg(any(feature = "testing", test))] +impl Default for PostgresqlConfig { + fn default() -> Self { + Self { + enabled: true, + host: "localhost".to_string(), + database_name: "event_sidecar".to_string(), + database_username: "postgres".to_string(), + database_password: "p@$$w0rd".to_string(), + max_connections_in_pool: 100, + port: 5432, + } + } +} + #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] pub struct PostgresqlConfigSerdeTarget { + pub enabled: bool, pub host: Option, pub database_name: Option, pub database_username: Option, @@ -237,6 +296,7 @@ impl TryFrom for PostgresqlConfig { })?; Ok(PostgresqlConfig { + enabled: value.enabled, host, database_name, database_username, @@ -248,7 +308,8 @@ impl TryFrom for PostgresqlConfig { } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -pub struct RestServerConfig { +pub struct RestApiServerConfig { + pub enable_server: bool, pub port: u16, pub max_concurrent_requests: u32, pub max_requests_per_second: u32, @@ -262,102 +323,35 @@ pub struct EventStreamServerConfig { } #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -pub struct AdminServerConfig { +pub struct AdminApiServerConfig { + pub enable_server: bool, pub port: u16, pub max_concurrent_requests: u32, pub max_requests_per_second: u32, } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_parse_nctl_config_toml() { - let expected_config = Config { - inbound_channel_size: None, - outbound_channel_size: None, - connections: vec![ - Connection::example_connection_1(), - Connection::example_connection_2(), - Connection::example_connection_3(), - ], - storage: StorageConfig::SqliteDbConfig { - storage_path: "./target/storage".to_string(), - sqlite_config: SqliteConfig { - file_name: "sqlite_database.db3".to_string(), - max_connections_in_pool: 100, - wal_autocheckpointing_interval: 1000, - }, - }, - rest_server: build_rest_server_config(), - event_stream_server: EventStreamServerConfig::default(), - admin_server: None, - }; - - let parsed_config: Config = read_config("../EXAMPLE_NCTL_CONFIG.toml") - .expect("Error parsing EXAMPLE_NCTL_CONFIG.toml") - .try_into() - .unwrap(); - - assert_eq!(parsed_config, expected_config); - } - - #[test] - fn should_parse_node_config_toml() { - let mut expected_connection = Connection::example_connection_1(); - expected_connection.sse_port = 9999; - expected_connection.rest_port = 8888; - expected_connection.max_attempts = 10; - expected_connection.enable_logging = true; - let mut expected_connection_2 = expected_connection.clone(); - expected_connection_2.ip_address = "168.254.51.2".to_string(); - let mut expected_connection_3 = expected_connection.clone(); - expected_connection_3.ip_address = "168.254.51.3".to_string(); - let expected_config = Config { - inbound_channel_size: None, - outbound_channel_size: None, - connections: vec![ - expected_connection, - expected_connection_2, - expected_connection_3, - ], - storage: StorageConfig::SqliteDbConfig { - storage_path: "/var/lib/casper-event-sidecar".to_string(), - sqlite_config: SqliteConfig { - file_name: "sqlite_database.db3".to_string(), - max_connections_in_pool: 100, - wal_autocheckpointing_interval: 1000, - }, - }, - rest_server: build_rest_server_config(), - event_stream_server: EventStreamServerConfig::default(), - admin_server: Some(AdminServerConfig { - port: 18887, - max_concurrent_requests: 1, - max_requests_per_second: 1, - }), - }; - let parsed_config: Config = read_config("../EXAMPLE_NODE_CONFIG.toml") - .expect("Error parsing EXAMPLE_NODE_CONFIG.toml") - .try_into() - .unwrap(); - - assert_eq!(parsed_config, expected_config); - } - - fn build_rest_server_config() -> RestServerConfig { - RestServerConfig { - port: 18888, +#[cfg(any(feature = "testing", test))] +impl Default for AdminApiServerConfig { + fn default() -> Self { + Self { + enable_server: true, + port: 1211, max_concurrent_requests: 50, - max_requests_per_second: 50, + max_requests_per_second: 60, } } +} + +#[cfg(any(feature = "testing", test))] +mod tests { + use std::net::Ipv4Addr; + + use super::*; impl Connection { pub fn example_connection_1() -> Connection { Connection { - ip_address: "127.0.0.1".to_string(), + ip_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), sse_port: 18101, rest_port: 14101, max_attempts: 10, @@ -372,7 +366,7 @@ mod tests { pub fn example_connection_2() -> Connection { Connection { - ip_address: "127.0.0.1".to_string(), + ip_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), sse_port: 18102, rest_port: 14102, max_attempts: 10, @@ -387,7 +381,7 @@ mod tests { pub fn example_connection_3() -> Connection { Connection { - ip_address: "127.0.0.1".to_string(), + ip_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), sse_port: 18103, rest_port: 14103, max_attempts: 10, @@ -404,7 +398,7 @@ mod tests { impl Default for Connection { fn default() -> Self { Self { - ip_address: "127.0.0.1".to_string(), + ip_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), sse_port: 18101, rest_port: 14101, allow_partial_connection: false, @@ -420,9 +414,10 @@ mod tests { impl Default for StorageConfig { fn default() -> Self { - StorageConfig::SqliteDbConfig { - storage_path: "/target/test_storage".to_string(), - sqlite_config: SqliteConfig::default(), + StorageConfig { + storage_folder: "abc".to_string(), + sqlite_config: Some(SqliteConfig::default()), + postgresql_config: None, } } } @@ -430,6 +425,7 @@ mod tests { impl Default for SqliteConfig { fn default() -> Self { Self { + enabled: true, file_name: "test_sqlite_database".to_string(), max_connections_in_pool: 100, wal_autocheckpointing_interval: 1000, @@ -437,9 +433,10 @@ mod tests { } } - impl Default for RestServerConfig { + impl Default for RestApiServerConfig { fn default() -> Self { Self { + enable_server: true, port: 17777, max_concurrent_requests: 50, max_requests_per_second: 50, diff --git a/sidecar/src/types/database.rs b/event_sidecar/src/types/database.rs similarity index 56% rename from sidecar/src/types/database.rs rename to event_sidecar/src/types/database.rs index 57efcb52..3baa4488 100644 --- a/sidecar/src/types/database.rs +++ b/event_sidecar/src/types/database.rs @@ -1,26 +1,121 @@ use crate::{ database::{ - postgresql_database::PostgreSqlDatabase, sqlite_database::SqliteDatabase, - types::DDLConfiguration, + postgresql_database::PostgreSqlDatabase, + sqlite_database::SqliteDatabase, + types::{DDLConfiguration, SseEnvelope}, }, - sql::tables, + sql::tables::{self, transaction_type::TransactionTypeId as SqlTransactionTypeId}, types::sse_events::{ - BlockAdded, DeployAccepted, DeployExpired, DeployProcessed, Fault, FinalitySignature, Step, + BlockAdded, Fault, FinalitySignature, Step, TransactionAccepted, TransactionExpired, + TransactionProcessed, }, + StorageConfig, }; -use anyhow::Error; +use anyhow::{Context, Error}; use async_trait::async_trait; -use casper_event_types::FinalitySignature as FinSig; +use casper_types::FinalitySignature as FinSig; use serde::{Deserialize, Serialize}; -use std::sync::Arc; +use std::{ + fmt::{Display, Formatter}, + path::Path, + sync::Arc, +}; +use tokio::sync::OnceCell; use utoipa::ToSchema; +pub enum TransactionTypeId { + Deploy, + Version1, +} + +impl From<&TransactionTypeId> for u8 { + fn from(transaction_type: &TransactionTypeId) -> u8 { + let sql_transaction_type = match transaction_type { + TransactionTypeId::Deploy => SqlTransactionTypeId::Deploy, + TransactionTypeId::Version1 => SqlTransactionTypeId::Version1, + }; + sql_transaction_type as u8 + } +} + +#[cfg(test)] +impl Display for TransactionTypeId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + TransactionTypeId::Deploy => write!(f, "deploy"), + TransactionTypeId::Version1 => write!(f, "version1"), + } + } +} + #[derive(Clone)] pub enum Database { SqliteDatabaseWrapper(SqliteDatabase), PostgreSqlDatabaseWrapper(PostgreSqlDatabase), } +/// Wrapper for a Database that is lazily initialized. +/// Using this structure ensures that the database connection is not being initialized +/// if the db-dependant components are configured, but disabled. +#[derive(Clone)] +pub struct LazyDatabaseWrapper { + config: StorageConfig, + resource: Arc>>, +} + +impl LazyDatabaseWrapper { + pub fn new(config: StorageConfig) -> Self { + let cell = Arc::new(tokio::sync::OnceCell::new()); + Self { + config, + resource: cell, + } + } + + pub async fn acquire(&self) -> &Result { + let config_ref = &self.config; + self.resource + .get_or_init(|| async move { Database::build(&config_ref.clone()).await }) + .await + } + + #[cfg(any(feature = "testing", test))] + pub fn for_tests() -> Self { + let db = Database::for_tests(); + let cell = Arc::new(tokio::sync::OnceCell::from(Ok(db))); + Self { + config: StorageConfig::default(), + resource: cell, + } + } +} + +impl Database { + pub async fn build(config: &StorageConfig) -> Result { + if let Some(sqlite_config) = &config.sqlite_config { + let sqlite_database = + SqliteDatabase::new(Path::new(&config.storage_folder), sqlite_config.clone()) + .await + .context("Error instantiating sqlite database") + .map_err(DatabaseInitializationError::from)?; + return Ok(Database::SqliteDatabaseWrapper(sqlite_database)); + } else if let Some(postgresql) = &config.postgresql_config { + let postgres_database = PostgreSqlDatabase::new(postgresql.clone()) + .await + .context("Error instantiating postgres database") + .map_err(DatabaseInitializationError::from)?; + return Ok(Database::PostgreSqlDatabaseWrapper(postgres_database)); + } + Err("Tried to build database without any enabled database configuration".into()) + } + + #[cfg(any(feature = "testing", test))] + pub fn for_tests() -> Database { + let sqlite_database = SqliteDatabase::new_in_memory_no_migrations(100).unwrap(); + Database::SqliteDatabaseWrapper(sqlite_database) + } +} + /// Describes a reference for the writing interface of an 'Event Store' database. /// There is a one-to-one relationship between each method and each event that can be received from the node. /// Each method takes the `data` and `id` fields as well as the source IP address (useful for tying the node-specific `id` to the relevant node). @@ -38,39 +133,47 @@ pub trait DatabaseWriter { block_added: BlockAdded, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a DeployAccepted event to the database. /// - /// * `deploy_accepted`: the [DeployAccepted] from the `data` field. + /// * `transaction_accepted`: the [DeployAccepted] from the `data` field. /// * `event_id`: the node-specific assigned `id`. /// * `event_source_address`: the IP address of the source node. - async fn save_deploy_accepted( + async fn save_transaction_accepted( &self, - deploy_accepted: DeployAccepted, + transaction_accepted: TransactionAccepted, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a DeployProcessed event to the database. /// - /// * `deploy_accepted`: the [DeployProcessed] from the `data` field. + /// * `transaction_accepted`: the [DeployProcessed] from the `data` field. /// * `event_id`: the node-specific assigned `id`. /// * `event_source_address`: the IP address of the source node. - async fn save_deploy_processed( + async fn save_transaction_processed( &self, - deploy_processed: DeployProcessed, + transaction_processed: TransactionProcessed, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a DeployExpired event to the database. /// - /// * `deploy_expired`: the [DeployExpired] from the `data` field. + /// * `transaction_expired`: the [DeployExpired] from the `data` field. /// * `event_id`: the node-specific assigned `id`. /// * `event_source_address`: the IP address of the source node. - async fn save_deploy_expired( + async fn save_transaction_expired( &self, - deploy_expired: DeployExpired, + transaction_expired: TransactionExpired, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a Fault event to the database. /// @@ -82,6 +185,8 @@ pub trait DatabaseWriter { fault: Fault, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a FinalitySignature event to the database. /// @@ -93,6 +198,8 @@ pub trait DatabaseWriter { finality_signature: FinalitySignature, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Save a Step event to the database. /// @@ -104,6 +211,8 @@ pub trait DatabaseWriter { step: Step, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; // Save data about shutdown to the database @@ -111,6 +220,8 @@ pub trait DatabaseWriter { &self, event_id: u32, event_source_address: String, + api_version: String, + network_name: String, ) -> Result; /// Executes migration and stores current migration version @@ -125,6 +236,39 @@ pub struct UniqueConstraintError { pub error: sqlx::Error, } +#[derive(Debug, Clone)] +pub struct DatabaseInitializationError { + reason: String, +} + +impl Display for DatabaseInitializationError { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "DatabaseInitializationError: {}", self.reason) + } +} + +impl From<&DatabaseInitializationError> for Error { + fn from(value: &DatabaseInitializationError) -> Self { + Error::msg(value.to_string()) + } +} + +impl DatabaseInitializationError { + pub fn from(error: Error) -> Self { + Self { + reason: error.to_string(), + } + } +} + +impl From<&str> for DatabaseInitializationError { + fn from(reason: &str) -> Self { + Self { + reason: reason.to_string(), + } + } +} + /// The database failed to insert a record(s). #[derive(Debug)] pub enum DatabaseWriteError { @@ -140,9 +284,29 @@ pub enum DatabaseWriteError { Unhandled(anyhow::Error), } -impl ToString for DatabaseWriteError { - fn to_string(&self) -> String { - format!("{:?}", self) +impl Display for DatabaseWriteError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + DatabaseWriteError::Serialisation(error) => { + write!(f, "DatabaseWriteError::Serialisation: {}", error) + } + DatabaseWriteError::SqlConstruction(error) => { + write!(f, "DatabaseWriteError::SqlConstruction: {}", error) + } + DatabaseWriteError::UniqueConstraint(unique_constraint_error) => { + write!( + f, + "DatabaseWriteError::UniqueConstraint: table: {}, error: {}", + unique_constraint_error.table, unique_constraint_error.error + ) + } + DatabaseWriteError::Database(error) => { + write!(f, "DatabaseWriteError::Database: {}", error) + } + DatabaseWriteError::Unhandled(error) => { + write!(f, "DatabaseWriteError::Unhandled: {}", error) + } + } } } @@ -176,7 +340,7 @@ impl From for DatabaseWriteError { } "1555" | "2067" => { // The message looks something like this: - // UNIQUE constraint failed: DeployProcessed.deploy_hash + // UNIQUE constraint failed: DeployProcessed.transaction_hash let table = db_err.message().split(':').collect::>()[1] .split('.') @@ -208,66 +372,79 @@ impl From for DatabaseWriteError { #[async_trait] pub trait DatabaseReader { /// Returns the latest [BlockAdded] by height from the database. - async fn get_latest_block(&self) -> Result; + async fn get_latest_block(&self) -> Result, DatabaseReadError>; /// Returns the [BlockAdded] corresponding to the provided `height`. /// /// * `height` - Height of the block which should be retrieved - async fn get_block_by_height(&self, height: u64) -> Result; + async fn get_block_by_height( + &self, + height: u64, + ) -> Result, DatabaseReadError>; /// Returns the [BlockAdded] corresponding to the provided hex-encoded `hash`. /// /// * `hash` - hash which identifies the block - async fn get_block_by_hash(&self, hash: &str) -> Result; - /// Returns an aggregate of the deploy's events corresponding to the given hex-encoded `hash` + async fn get_block_by_hash( + &self, + hash: &str, + ) -> Result, DatabaseReadError>; + /// Returns an aggregate of the transaction's events corresponding to the given hex-encoded `hash` /// - /// * `hash` - deploy hash of which the aggregate data should be fetched - async fn get_deploy_aggregate_by_hash( + /// * `hash` - transaction hash of which the aggregate data should be fetched + async fn get_transaction_aggregate_by_identifier( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result; + ) -> Result; /// Returns the [DeployAccepted] corresponding to the given hex-encoded `hash` /// - /// * `hash` - deploy hash which identifies the deploy accepted - async fn get_deploy_accepted_by_hash( + /// * `hash` - transaction hash which identifies the transaction accepted + async fn get_transaction_accepted_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result; + ) -> Result, DatabaseReadError>; /// Returns the [DeployProcessed] corresponding to the given hex-encoded `hash` /// - /// * `hash` - deploy hash which identifies the deploy pocessed - async fn get_deploy_processed_by_hash( + /// * `hash` - transaction hash which identifies the transaction pocessed + async fn get_transaction_processed_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result; + ) -> Result, DatabaseReadError>; /// Returns the [DeployExpired] corresponding to the given hex-encoded `hash` /// - /// * `hash` - deploy hash which identifies the deploy expired - async fn get_deploy_expired_by_hash( + /// * `hash` - transaction hash which identifies the transaction expired + async fn get_transaction_expired_by_hash( &self, + transaction_type: &TransactionTypeId, hash: &str, - ) -> Result; + ) -> Result, DatabaseReadError>; /// Returns all [Fault]s that correspond to the given hex-encoded `public_key` /// /// * `public_key` - key which identifies the fault async fn get_faults_by_public_key( &self, public_key: &str, - ) -> Result, DatabaseReadError>; + ) -> Result>, DatabaseReadError>; /// Returns all [Fault]s that occurred in the given `era` /// /// * `era` - number of era for which faults should be fetched - async fn get_faults_by_era(&self, era: u64) -> Result, DatabaseReadError>; + async fn get_faults_by_era( + &self, + era: u64, + ) -> Result>, DatabaseReadError>; /// Returns all [FinalitySignature](casper_event_types::FinalitySignature)s for the given hex-encoded `block_hash`. /// /// * `block_hash` - block hash for which finality signatures should be fetched async fn get_finality_signatures_by_block( &self, block_hash: &str, - ) -> Result, DatabaseReadError>; + ) -> Result>, DatabaseReadError>; /// Returns the [Step] event for the given era. /// /// * `era` - identifier of era - async fn get_step_by_era(&self, era: u64) -> Result; + async fn get_step_by_era(&self, era: u64) -> Result, DatabaseReadError>; /// Returns number of events stored in db. async fn get_number_of_events(&self) -> Result; @@ -288,11 +465,13 @@ pub enum DatabaseReadError { } #[derive(Debug, Deserialize, Serialize, Clone, ToSchema)] -pub struct DeployAggregate { - pub(crate) deploy_hash: String, - pub(crate) deploy_accepted: Option, - pub(crate) deploy_processed: Option, - pub(crate) deploy_expired: bool, +pub struct TransactionAggregate { + pub(crate) transaction_hash: String, + #[schema(value_type = TransactionAcceptedEnveloped)] + pub(crate) transaction_accepted: Option>, + #[schema(value_type = TransactionProcessedEnveloped)] + pub(crate) transaction_processed: Option>, + pub(crate) transaction_expired: bool, } #[allow(dead_code)] //Allowing dead code here because the Raw enum is used only in ITs @@ -357,11 +536,25 @@ impl Migration { Migration { version: Some(1), statement_producers: |config: DDLConfiguration| { - let insert_types_stmt = - tables::event_type::create_initialise_stmt().map_err(|err| { - Error::msg(format!("Error building create_initialise_stmt: {:?}", err)) + let insert_event_types_stmt = tables::event_type::create_initialise_stmt() + .map_err(|err| { + Error::msg(format!( + "Error building event types insert statement: {:?}", + err + )) + })?; + let insert_transaction_types_stmt = + tables::transaction_type::create_initialise_stmt().map_err(|err| { + Error::msg(format!( + "Error building transaction types insert statement: {:?}", + err + )) })?; - Ok(migration_1_ddl_statements(config, insert_types_stmt)) + Ok(migration_1_ddl_statements( + config, + insert_event_types_stmt, + insert_transaction_types_stmt, + )) }, script_executor: None, } @@ -381,26 +574,29 @@ impl Migration { fn migration_1_ddl_statements( config: DDLConfiguration, - insert_types_stmt: sea_query::InsertStatement, + insert_event_types_stmt: sea_query::InsertStatement, + insert_transaction_types_stmt: sea_query::InsertStatement, ) -> Vec { - let init_stmt = StatementWrapper::InsertStatement(insert_types_stmt); vec![ // Synthetic tables StatementWrapper::TableCreateStatement(Box::new(tables::event_type::create_table_stmt())), - StatementWrapper::TableCreateStatement(Box::new(tables::event_log::create_table_stmt( - config.is_big_integer_id, - ))), - StatementWrapper::TableCreateStatement(Box::new(tables::deploy_event::create_table_stmt())), + StatementWrapper::TableCreateStatement(Box::new( + tables::transaction_type::create_table_stmt(), + )), + StatementWrapper::TableCreateStatement(Box::new(tables::event_log::create_table_stmt())), + StatementWrapper::TableCreateStatement(Box::new( + tables::transaction_event::create_table_stmt(), + )), // Raw Event tables StatementWrapper::TableCreateStatement(Box::new(tables::block_added::create_table_stmt())), StatementWrapper::TableCreateStatement(Box::new( - tables::deploy_accepted::create_table_stmt(), + tables::transaction_accepted::create_table_stmt(), )), StatementWrapper::TableCreateStatement(Box::new( - tables::deploy_processed::create_table_stmt(), + tables::transaction_processed::create_table_stmt(), )), StatementWrapper::TableCreateStatement(Box::new( - tables::deploy_expired::create_table_stmt(), + tables::transaction_expired::create_table_stmt(), )), StatementWrapper::TableCreateStatement(Box::new(tables::fault::create_table_stmt( config.db_supports_unsigned, @@ -412,6 +608,7 @@ fn migration_1_ddl_statements( config.db_supports_unsigned, ))), StatementWrapper::TableCreateStatement(Box::new(tables::shutdown::create_table_stmt())), - init_stmt, + StatementWrapper::InsertStatement(insert_event_types_stmt), + StatementWrapper::InsertStatement(insert_transaction_types_stmt), ] } diff --git a/event_sidecar/src/types/sse_events.rs b/event_sidecar/src/types/sse_events.rs new file mode 100644 index 00000000..0f39b3f9 --- /dev/null +++ b/event_sidecar/src/types/sse_events.rs @@ -0,0 +1,357 @@ +#[cfg(test)] +use casper_types::ChainNameDigest; +use casper_types::{ + contract_messages::Messages, execution::ExecutionResult, AsymmetricType, Block, BlockHash, + EraId, InitiatorAddr, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, + TransactionHash, +}; +#[cfg(test)] +use casper_types::{ + execution::{execution_result_v1::ExecutionResultV1, Effects, ExecutionResultV2}, + testing::TestRng, + TestBlockBuilder, TestBlockV1Builder, +}; +use casper_types::{FinalitySignature as FinSig, Signature}; +use derive_new::new; +use hex::ToHex; +#[cfg(test)] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use serde_json::value::RawValue; +use std::{ + fmt::{Display, Formatter}, + sync::Arc, +}; +use utoipa::ToSchema; + +use crate::sql::tables::transaction_type::TransactionTypeId; + +/// The version of this node's API server. This event will always be the first sent to a new +/// client, and will have no associated event ID provided. +#[derive(Clone, Debug, Serialize, Deserialize, new)] +pub struct ApiVersion(ProtocolVersion); + +/// The given block has been added to the linear chain and stored locally. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct BlockAdded { + block_hash: BlockHash, + block: Box, +} + +#[cfg(test)] +pub fn random_execution_result(rng: &mut TestRng) -> ExecutionResult { + match rng.gen_range(0..2) { + 0 => { + let result_v1: ExecutionResultV1 = rng.gen(); + ExecutionResult::V1(result_v1) + } + 1 => { + let result_v2 = ExecutionResultV2::random(rng); + ExecutionResult::V2(result_v2) + } + _ => panic!("Unexpected value"), + } +} + +#[cfg(test)] +impl BlockAdded { + pub fn random(rng: &mut TestRng) -> Self { + let block = match rng.gen_range(0..2) { + 0 => { + let block_v1 = TestBlockV1Builder::default().build(rng); + Block::V1(block_v1) + } + 1 => { + let block_v2 = TestBlockBuilder::default().build(rng); + Block::V2(block_v2) + } + _ => panic!("Unexpected value"), + }; + Self { + block_hash: *block.hash(), + block: Box::new(block), + } + } +} + +impl BlockAdded { + pub fn hex_encoded_hash(&self) -> String { + hex::encode(self.block_hash.inner()) + } + + pub fn get_height(&self) -> u64 { + self.block.height() + } +} + +/// The given transaction has been newly-accepted by this node. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct TransactionAccepted { + // It's an Arc to not create multiple copies of the same transaction for multiple subscribers. + transaction: Arc, +} + +impl TransactionAccepted { + pub fn identifier(&self) -> String { + transaction_hash_to_identifier(&self.transaction.hash()) + } + + pub fn transaction_type_id(&self) -> TransactionTypeId { + match *self.transaction { + Transaction::Deploy(_) => TransactionTypeId::Deploy, + Transaction::V1(_) => TransactionTypeId::Version1, + } + } + + pub fn transaction(&self) -> Arc { + self.transaction.clone() + } + + #[cfg(test)] + pub fn api_transaction_type_id(&self) -> crate::types::database::TransactionTypeId { + match *self.transaction { + Transaction::Deploy(_) => crate::types::database::TransactionTypeId::Deploy, + Transaction::V1(_) => crate::types::database::TransactionTypeId::Version1, + } + } + + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + Self { + transaction: Arc::new(Transaction::random(rng)), + } + } + + #[cfg(test)] + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction.hash().to_owned() + } + + pub fn hex_encoded_hash(&self) -> String { + let hex_fmt: String = match self.transaction.hash() { + TransactionHash::Deploy(deploy) => deploy.encode_hex(), + TransactionHash::V1(transaction) => transaction.encode_hex(), + }; + hex_fmt + } +} + +/// The given transaction has been executed, committed and forms part of the given block. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct TransactionProcessed { + transaction_hash: Box, + #[schema(value_type = String)] + initiator_addr: Box, + #[schema(value_type = String)] + timestamp: Timestamp, + #[schema(value_type = String)] + ttl: TimeDiff, + block_hash: Box, + execution_result: Box, + messages: Messages, +} + +impl TransactionProcessed { + pub fn identifier(&self) -> String { + transaction_hash_to_identifier(&self.transaction_hash) + } + + pub fn transaction_type_id(&self) -> TransactionTypeId { + match *self.transaction_hash.as_ref() { + TransactionHash::Deploy(_) => TransactionTypeId::Deploy, + TransactionHash::V1(_) => TransactionTypeId::Version1, + } + } + + #[cfg(test)] + pub fn api_transaction_type_id(&self) -> crate::types::database::TransactionTypeId { + match *self.transaction_hash.as_ref() { + TransactionHash::Deploy(_) => crate::types::database::TransactionTypeId::Deploy, + TransactionHash::V1(_) => crate::types::database::TransactionTypeId::Version1, + } + } + + #[cfg(test)] + pub fn random(rng: &mut TestRng, with_transaction_hash: Option) -> Self { + let transaction = Transaction::random(rng); + let ttl = match &transaction { + Transaction::Deploy(deploy) => deploy.ttl(), + Transaction::V1(transaction) => transaction.ttl(), + }; + let timestamp = match &transaction { + Transaction::Deploy(deploy) => deploy.timestamp(), + Transaction::V1(transaction) => transaction.timestamp(), + }; + let initiator_addr = Box::new(transaction.initiator_addr()); + Self { + transaction_hash: Box::new(with_transaction_hash.unwrap_or(transaction.hash())), + initiator_addr, + timestamp, + ttl, + block_hash: Box::new(BlockHash::random(rng)), + execution_result: Box::new(random_execution_result(rng)), + messages: rng.random_vec(1..5), + } + } + + pub fn hex_encoded_hash(&self) -> String { + match *self.transaction_hash.as_ref() { + TransactionHash::Deploy(deploy_hash) => deploy_hash.encode_hex(), + TransactionHash::V1(v1_hash) => v1_hash.encode_hex(), + } + } + + pub fn messages(&self) -> &Messages { + &self.messages + } +} + +/// The given transaction has expired. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct TransactionExpired { + transaction_hash: TransactionHash, +} + +impl TransactionExpired { + pub fn identifier(&self) -> String { + transaction_hash_to_identifier(&self.transaction_hash) + } + + pub fn transaction_type_id(&self) -> TransactionTypeId { + match self.transaction_hash { + TransactionHash::Deploy(_) => TransactionTypeId::Deploy, + TransactionHash::V1(_) => TransactionTypeId::Version1, + } + } + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + #[cfg(test)] + pub fn api_transaction_type_id(&self) -> crate::types::database::TransactionTypeId { + match self.transaction_hash { + TransactionHash::Deploy(_) => crate::types::database::TransactionTypeId::Deploy, + TransactionHash::V1(_) => crate::types::database::TransactionTypeId::Version1, + } + } + + #[cfg(test)] + pub fn random(rng: &mut TestRng, with_transaction_hash: Option) -> Self { + Self { + transaction_hash: with_transaction_hash.unwrap_or_else(|| TransactionHash::random(rng)), + } + } + + pub fn hex_encoded_hash(&self) -> String { + match self.transaction_hash { + TransactionHash::Deploy(deploy_hash) => deploy_hash.encode_hex(), + TransactionHash::V1(v1_hash) => v1_hash.encode_hex(), + } + } +} + +/// Generic representation of validator's fault in an era. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct Fault { + #[schema(value_type = u64)] + pub era_id: EraId, + /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." + #[schema(value_type = String)] + pub public_key: PublicKey, + #[schema(value_type = String)] + pub timestamp: Timestamp, +} + +impl Fault { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + Self { + era_id: EraId::new(rng.gen()), + public_key: PublicKey::random(rng), + timestamp: Timestamp::random(rng), + } + } +} + +impl Display for Fault { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{:#?}", self) + } +} + +/// New finality signature received. +#[derive(Clone, Debug, Serialize, Deserialize, new)] +pub struct FinalitySignature(Box); + +impl From for FinSig { + fn from(val: FinalitySignature) -> Self { + *val.0 + } +} + +impl FinalitySignature { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + let block_hash = BlockHash::random(rng); + let block_height = rng.gen::(); + let era_id = EraId::random(rng); + let chain_name_digest = ChainNameDigest::random(rng); + Self(Box::new(FinSig::random_for_block( + block_hash, + block_height, + era_id, + chain_name_digest, + rng, + ))) + } + + pub fn inner(&self) -> FinSig { + *self.0.clone() + } + + pub fn signature(&self) -> &Signature { + self.0.signature() + } + + pub fn block_hash(&self) -> &BlockHash { + self.0.block_hash() + } + + pub fn hex_encoded_block_hash(&self) -> String { + hex::encode(self.0.block_hash().inner()) + } + + pub fn hex_encoded_public_key(&self) -> String { + self.0.public_key().to_hex() + } +} + +/// The execution effects produced by a `StepRequest`. +#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] +pub struct Step { + #[schema(value_type = u64)] + pub era_id: EraId, + #[schema(value_type = ExecutionEffect)] + //This technically is not amorphic, but this field is potentially > 30MB of size. By not parsing it we make the process of intaking these messages much quicker and less memory consuming + execution_effect: Box, +} + +impl Step { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + use serde_json::value::to_raw_value; + + let execution_effect = Effects::random(rng); + Self { + era_id: EraId::new(rng.gen()), + execution_effect: to_raw_value(&execution_effect).unwrap(), + } + } +} + +pub fn transaction_hash_to_identifier(transaction_hash: &TransactionHash) -> String { + match transaction_hash { + TransactionHash::Deploy(deploy) => hex::encode(deploy.inner()), + TransactionHash::V1(transaction) => hex::encode(transaction.inner()), + } +} diff --git a/sidecar/src/utils.rs b/event_sidecar/src/utils.rs similarity index 74% rename from sidecar/src/utils.rs rename to event_sidecar/src/utils.rs index 83bb940a..1c961e96 100644 --- a/sidecar/src/utils.rs +++ b/event_sidecar/src/utils.rs @@ -1,5 +1,5 @@ #[cfg(feature = "additional-metrics")] -use crate::metrics::EVENTS_PROCESSED_PER_SECOND; +use metrics::db::EVENTS_PROCESSED_PER_SECOND; #[cfg(feature = "additional-metrics")] use std::sync::Arc; #[cfg(feature = "additional-metrics")] @@ -136,7 +136,7 @@ pub fn start_metrics_thread(module_name: String) -> Sender<()> { let metrics_data_for_thread = metrics_data.clone(); tokio::spawn(async move { let metrics_data = metrics_data_for_thread; - while let Some(_) = metrics_queue_rx.recv().await { + while metrics_queue_rx.recv().await.is_some() { let mut guard = metrics_data.lock().await; guard.observed_events += 1; drop(guard); @@ -147,26 +147,24 @@ pub fn start_metrics_thread(module_name: String) -> Sender<()> { #[cfg(test)] pub mod tests { - use crate::database::postgresql_database::PostgreSqlDatabase; - use crate::run; - use crate::testing::mock_node::tests::MockNode; - use crate::testing::testing_config::get_port; - use crate::testing::testing_config::prepare_config; - use crate::testing::testing_config::TestingConfig; - use crate::types::config::Config; - use anyhow::Error; + use crate::{ + database::{postgresql_database::PostgreSqlDatabase, sqlite_database::SqliteDatabase}, + run, run_rest_server, + testing::{ + mock_node::tests::MockNode, + testing_config::{get_port, prepare_config, TestingConfig}, + }, + Database, + }; use anyhow::Error as AnyhowError; - use pg_embed::pg_enums::PgAuthMethod; - use pg_embed::postgres::PgSettings; use pg_embed::{ + pg_enums::PgAuthMethod, pg_fetch::{PgFetchSettings, PG_V13}, - postgres::PgEmbed, + postgres::{PgEmbed, PgSettings}, }; - use std::path::PathBuf; - use std::time::Duration; + use std::{path::PathBuf, process::ExitCode, time::Duration}; use tempfile::{tempdir, TempDir}; - use tokio::sync::mpsc::Receiver; - use tokio::time::timeout; + use tokio::{sync::mpsc::Receiver, time::timeout}; pub(crate) fn display_duration(duration: Duration) -> String { // less than a second @@ -208,9 +206,9 @@ pub mod tests { }); match timeout(timeout_after, join_handle).await { Ok(res) => { - res.map_err(|err| Error::msg(format!("Failed to wait for receiver, {}", err))) + res.map_err(|err| AnyhowError::msg(format!("Failed to wait for receiver, {}", err))) } - Err(_) => Err(Error::msg("Waiting for messages timed out")), + Err(_) => Err(AnyhowError::msg("Waiting for messages timed out")), } .unwrap() } @@ -228,6 +226,8 @@ pub mod tests { let infix_str = infix.as_str(); data.iter().any(|x| x.contains(infix_str)) } + + #[allow(dead_code)] pub struct MockNodeTestProperties { pub testing_config: TestingConfig, pub temp_storage_dir: TempDir, @@ -246,7 +246,7 @@ pub mod tests { node_mock.set_sse_port(node_port_for_sse_connection); node_mock.set_rest_port(node_port_for_rest_connection); start_nodes_and_wait(vec![node_mock]).await; - start_sidecar(testing_config.inner()).await; + start_sidecar(testing_config.clone()).await; MockNodeTestProperties { testing_config, temp_storage_dir, @@ -255,30 +255,67 @@ pub mod tests { event_stream_server_port, } } - pub async fn start_sidecar(config: Config) -> tokio::task::JoinHandle> { - tokio::spawn(async move { run(config).await }) // starting event sidecar + + pub async fn start_sidecar_with_rest_api( + config: TestingConfig, + ) -> tokio::task::JoinHandle> { + tokio::spawn(async move { unpack_test_config_and_run(config, true).await }) + // starting the sidecar + } + + pub async fn start_sidecar( + config: TestingConfig, + ) -> tokio::task::JoinHandle> { + tokio::spawn(async move { unpack_test_config_and_run(config, false).await }) + // starting the sidecar } + pub fn build_test_config() -> (TestingConfig, TempDir, u16, u16, u16) { - build_test_config_with_retries(10, 1) + build_test_config_with_retries(10, 1, true, None) } - pub fn build_test_config_without_connections() -> (TestingConfig, TempDir, u16) { + + pub fn build_test_config_without_db_storage() -> (TestingConfig, TempDir, u16, u16, u16) { + build_test_config_with_retries(10, 1, false, None) + } + + pub fn build_test_config_with_network_name( + network_name: &str, + ) -> (TestingConfig, TempDir, u16, u16, u16) { + build_test_config_with_retries(10, 1, true, Some(network_name.into())) + } + + pub fn build_test_config_without_connections( + enable_db_storage: bool, + maybe_network_name: Option, + ) -> (TestingConfig, TempDir, u16) { let temp_storage_dir = tempdir().expect("Should have created a temporary storage directory"); - let testing_config = prepare_config(&temp_storage_dir); + let testing_config = + prepare_config(&temp_storage_dir, enable_db_storage, maybe_network_name); let event_stream_server_port = testing_config.event_stream_server_port(); (testing_config, temp_storage_dir, event_stream_server_port) } pub fn build_test_config_with_retries( max_attempts: usize, delay_between_retries: usize, + enable_db_storage: bool, + maybe_network_name: Option, ) -> (TestingConfig, TempDir, u16, u16, u16) { let (mut testing_config, temp_storage_dir, event_stream_server_port) = - build_test_config_without_connections(); + build_test_config_without_connections(enable_db_storage, maybe_network_name); testing_config.add_connection(None, None, None); - let node_port_for_sse_connection = - testing_config.config.connections.get(0).unwrap().sse_port; - let node_port_for_rest_connection = - testing_config.config.connections.get(0).unwrap().rest_port; + let node_port_for_sse_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .sse_port; + let node_port_for_rest_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .rest_port; testing_config.set_retries_for_node( node_port_for_sse_connection, max_attempts, @@ -375,14 +412,22 @@ pub mod tests { let context = build_postgres_database().await.unwrap(); let temp_storage_dir = tempdir().expect("Should have created a temporary storage directory"); - let mut testing_config = prepare_config(&temp_storage_dir); + let mut testing_config = prepare_config(&temp_storage_dir, true, None); let event_stream_server_port = testing_config.event_stream_server_port(); testing_config.set_storage(StorageConfig::postgres_with_port(context.port)); testing_config.add_connection(None, None, None); - let node_port_for_sse_connection = - testing_config.config.connections.get(0).unwrap().sse_port; - let node_port_for_rest_connection = - testing_config.config.connections.get(0).unwrap().rest_port; + let node_port_for_sse_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .sse_port; + let node_port_for_rest_connection = testing_config + .event_server_config + .connections + .first() + .unwrap() + .rest_port; testing_config.set_retries_for_node( node_port_for_sse_connection, max_attempts, @@ -398,4 +443,42 @@ pub mod tests { context, ) } + + pub async fn unpack_test_config_and_run( + testing_config: TestingConfig, + spin_up_rest_api: bool, + ) -> Result { + let has_db_configured = testing_config.has_db_configured(); + let sse_config = testing_config.inner(); + let storage_config = testing_config.storage_config; + let storage_folder = storage_config.storage_folder.clone(); + let maybe_database = if has_db_configured { + let sqlite_database = SqliteDatabase::new_from_config(&storage_config) + .await + .unwrap(); + Some(Database::SqliteDatabaseWrapper(sqlite_database)) + } else { + None + }; + + if spin_up_rest_api { + if !has_db_configured { + return Err(AnyhowError::msg( + "Can't unpack TestingConfig with REST API if no database is configured", + )); + } + let rest_api_server_config = testing_config.rest_api_server_config; + let database_for_rest_api = maybe_database.clone().unwrap(); + tokio::spawn(async move { + run_rest_server(rest_api_server_config, database_for_rest_api).await + }); + } + run( + sse_config, + storage_folder, + maybe_database, + testing_config.network_name, + ) + .await + } } diff --git a/images/SidecarDiagram.png b/images/SidecarDiagram.png deleted file mode 100644 index 8049d761..00000000 Binary files a/images/SidecarDiagram.png and /dev/null differ diff --git a/json_rpc/Cargo.toml b/json_rpc/Cargo.toml new file mode 100644 index 00000000..c0a5f66b --- /dev/null +++ b/json_rpc/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "casper-json-rpc" +version = "1.1.0" +authors = ["Fraser Hutchison "] +edition = "2018" +description = "A library suitable for use as the framework for a JSON-RPC server." +readme = "README.md" +documentation = "https://docs.rs/casper-json-rpc" +homepage = "https://casperlabs.io" +repository = "https://github.com/casper-network/casper-node/tree/master/json_rpc" +license = "Apache-2.0" + +[dependencies] +bytes = "1.1.0" +futures = { workspace = true } +http = "0.2.7" +itertools = { workspace = true } +metrics = { workspace = true } +serde = { workspace = true, default-features = true, features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +tracing = { workspace = true, default-features = true } +warp = "0.3.6" + +[dev-dependencies] +env_logger = "0" +hyper = "0.14.18" +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "test-util"] } diff --git a/json_rpc/README.md b/json_rpc/README.md new file mode 100644 index 00000000..b0c5cc91 --- /dev/null +++ b/json_rpc/README.md @@ -0,0 +1,116 @@ +# The `casper-json-rpc` Library + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-json-rpc)](https://crates.io/crates/casper-json-rpc) +[![Documentation](https://docs.rs/casper-node/badge.svg)](https://docs.rs/casper-json-rpc) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) + +The `casper-json-rpc` library described here can be used as the framework for a JSON-RPC server. + +# Usage + +Typical usage of this library involves two steps: + +* Construct a set of request handlers using a +[`RequestHandlersBuilder`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.RequestHandlersBuilder.html). +* Call [`casper_json_rpc::route`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/fn.route.html) to construct a boxed warp filter ready to be passed to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html). + +# Example + +```rust +use casper_json_rpc::{Error, Params, RequestHandlersBuilder}; +use std::{convert::Infallible, sync::Arc}; + +async fn get(params: Option) -> Result { + // * parse params or return `ReservedErrorCode::InvalidParams` error + // * handle request and return result + Ok("got it".to_string()) +} + +async fn put(params: Option, other_input: &str) -> Result { + Ok(other_input.to_string()) +} + +#[tokio::main] +async fn main() { + // Register handlers for methods "get" and "put". + let mut handlers = RequestHandlersBuilder::new(); + handlers.register_handler("get", Arc::new(get)); + let put_handler = move |params| async move { put(params, "other input").await }; + handlers.register_handler("put", Arc::new(put_handler)); + let handlers = handlers.build(); + + // Get the new route. + let path = "rpc"; + let max_body_bytes = 1024; + let route = casper_json_rpc::route(path, max_body_bytes, handlers); + + // Convert it into a `Service` and run it. + let make_svc = hyper::service::make_service_fn(move |_| { + let svc = warp::service(route.clone()); + async move { Ok::<_, Infallible>(svc.clone()) } + }); + + hyper::Server::bind(&([127, 0, 0, 1], 3030).into()) + .serve(make_svc) + .await + .unwrap(); +} +``` + +The following is a sample request: + +```sh +curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"get"}' http://127.0.0.1:3030/rpc +``` + +Here is a sample response: + +```sh +{"jsonrpc":"2.0","id":"id","result":"got it"} +``` + +# Errors + +To return a JSON-RPC response indicating an error, use +[`Error::new`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/struct.Error.html#method.new). Most error +conditions that require returning a reserved error are already handled in the provided warp filters. The only +exception is +[`ReservedErrorCode::InvalidParams`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/enum.ReservedErrorCode.html#variant.InvalidParams), which should be returned by any RPC handler that deems the provided `params: Option` to be invalid for any +reason. + +Generally, a set of custom error codes should be provided. These should all implement +[`ErrorCodeT`](https://docs.rs/casper-json-rpc/latest/casper_json_rpc/trait.ErrorCodeT.html). + +## Example custom error code + +```rust +use serde::{Deserialize, Serialize}; +use casper_json_rpc::ErrorCodeT; + +#[derive(Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ErrorCode { + /// The requested item was not found. + NoSuchItem = -1, + /// Failed to put the requested item to storage. + FailedToPutItem = -2, +} + +impl From for (i64, &'static str) { + fn from(error_code: ErrorCode) -> Self { + match error_code { + ErrorCode::NoSuchItem => (error_code as i64, "No such item"), + ErrorCode::FailedToPutItem => (error_code as i64, "Failed to put item"), + } + } +} + +impl ErrorCodeT for ErrorCode {} +``` + +# License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/json_rpc/src/error.rs b/json_rpc/src/error.rs new file mode 100644 index 00000000..1c74d79f --- /dev/null +++ b/json_rpc/src/error.rs @@ -0,0 +1,283 @@ +use std::{borrow::Cow, fmt::Debug, hash::Hash}; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tracing::{error, warn}; + +/// A marker trait for a type suitable for use as an error code when constructing an [`Error`]. +/// +/// The implementing type must also implement `Into<(i64, &'static str)>` where the tuple represents +/// the "code" and "message" fields of the `Error`. +/// +/// As per the JSON-RPC specification, the code must not fall in the reserved range, i.e. it must +/// not be between -32768 and -32000 inclusive. +/// +/// Generally the "message" will be a brief const &str, where additional request-specific info can +/// be provided via the `additional_info` parameter of [`Error::new`]. +/// +/// # Example +/// +/// ``` +/// use serde::{Deserialize, Serialize}; +/// use casper_json_rpc::ErrorCodeT; +/// +/// #[derive(Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +/// #[repr(i64)] +/// pub enum ErrorCode { +/// /// The requested item was not found. +/// NoSuchItem = -1, +/// /// Failed to put the requested item to storage. +/// FailedToPutItem = -2, +/// } +/// +/// impl From for (i64, &'static str) { +/// fn from(error_code: ErrorCode) -> Self { +/// match error_code { +/// ErrorCode::NoSuchItem => (error_code as i64, "No such item"), +/// ErrorCode::FailedToPutItem => (error_code as i64, "Failed to put item"), +/// } +/// } +/// } +/// +/// impl ErrorCodeT for ErrorCode {} +/// ``` +pub trait ErrorCodeT: + Into<(i64, &'static str)> + for<'de> Deserialize<'de> + Copy + Eq + Debug +{ + /// Whether this type represents reserved error codes or not. + /// + /// This should normally be left with the default return value of `false`. + #[doc(hidden)] + fn is_reserved() -> bool { + false + } +} + +/// The various reserved codes which can be returned in the JSON-RPC response's "error" object. +/// +/// See [the JSON-RPC Specification](https://www.jsonrpc.org/specification#error_object) for further +/// details. +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ReservedErrorCode { + /// Invalid JSON was received by the server. + ParseError = -32700, + /// The JSON sent is not a valid Request object. + InvalidRequest = -32600, + /// The method does not exist or is not available. + MethodNotFound = -32601, + /// Invalid method parameter(s). + InvalidParams = -32602, + /// Internal JSON-RPC error. + InternalError = -32603, +} + +impl From for (i64, &'static str) { + fn from(error_code: ReservedErrorCode) -> Self { + match error_code { + ReservedErrorCode::ParseError => (error_code as i64, "Parse error"), + ReservedErrorCode::InvalidRequest => (error_code as i64, "Invalid Request"), + ReservedErrorCode::MethodNotFound => (error_code as i64, "Method not found"), + ReservedErrorCode::InvalidParams => (error_code as i64, "Invalid params"), + ReservedErrorCode::InternalError => (error_code as i64, "Internal error"), + } + } +} + +impl ErrorCodeT for ReservedErrorCode { + fn is_reserved() -> bool { + true + } +} + +/// An object suitable to be returned in a JSON-RPC response as the "error" field. +/// +/// See [the JSON-RPC Specification](https://www.jsonrpc.org/specification#error_object) for further +/// details. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[serde(deny_unknown_fields)] +pub struct Error { + /// A number that indicates the error type that occurred. + code: i64, + /// A short description of the error. + message: Cow<'static, str>, + /// Additional information about the error. + data: Option, +} + +impl Error { + /// Returns a new `Error`, converting `error_code` to the "code" and "message" fields, and + /// JSON-encoding `additional_info` as the "data" field. + /// + /// Other than when providing a [`ReservedErrorCode`], the converted "code" must not fall in the + /// reserved range as defined in the JSON-RPC specification, i.e. it must not be between -32768 + /// and -32100 inclusive. + /// + /// Note that in an upcoming release, the restriction will be tightened to disallow error codes + /// in the implementation-defined server-errors range. I.e. codes in the range -32768 to -32000 + /// inclusive will be disallowed. + /// + /// If the converted code is within the reserved range when it should not be, or if + /// JSON-encoding `additional_data` fails, the returned `Self` is built from + /// [`ReservedErrorCode::InternalError`] with the "data" field being a String providing more + /// info on the underlying error. + pub fn new(error_code: C, additional_info: T) -> Self { + let (code, message): (i64, &'static str) = error_code.into(); + + if !C::is_reserved() && (-32768..=-32100).contains(&code) { + warn!(%code, "provided json-rpc error code is reserved; returning internal error"); + let (code, message) = ReservedErrorCode::InternalError.into(); + return Error { + code, + message: Cow::Borrowed(message), + data: Some(Value::String(format!( + "attempted to return reserved error code {}", + code + ))), + }; + } + + let data = match serde_json::to_value(additional_info) { + Ok(Value::Null) => None, + Ok(value) => Some(value), + Err(error) => { + error!(%error, "failed to json-encode additional info in json-rpc error"); + let (code, message) = ReservedErrorCode::InternalError.into(); + return Error { + code, + message: Cow::Borrowed(message), + data: Some(Value::String(format!( + "failed to json-encode additional info in json-rpc error: {}", + error + ))), + }; + } + }; + + Error { + code, + message: Cow::Borrowed(message), + data, + } + } + + /// Returns the code of the error. + pub fn code(&self) -> i64 { + self.code + } +} + +#[cfg(test)] +mod tests { + use serde::ser::{Error as _, Serializer}; + + use super::*; + + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] + struct TestErrorCode { + // If `true` the error code will be one in the reserved range. + in_reserved_range: bool, + } + + impl From for (i64, &'static str) { + fn from(error_code: TestErrorCode) -> Self { + if error_code.in_reserved_range { + (-32768, "Invalid test error") + } else { + (-123, "Valid test error") + } + } + } + + impl ErrorCodeT for TestErrorCode {} + + #[derive(Serialize)] + struct AdditionalInfo { + id: u64, + context: &'static str, + } + + impl Default for AdditionalInfo { + fn default() -> Self { + AdditionalInfo { + id: 1314, + context: "TEST", + } + } + } + + struct FailToEncode; + + impl Serialize for FailToEncode { + fn serialize(&self, _serializer: S) -> Result { + Err(S::Error::custom("won't encode")) + } + } + + #[test] + fn should_construct_reserved_error() { + const EXPECTED_WITH_DATA: &str = + r#"{"code":-32700,"message":"Parse error","data":{"id":1314,"context":"TEST"}}"#; + const EXPECTED_WITHOUT_DATA: &str = + r#"{"code":-32601,"message":"Method not found","data":null}"#; + const EXPECTED_WITH_BAD_DATA: &str = r#"{"code":-32603,"message":"Internal error","data":"failed to json-encode additional info in json-rpc error: won't encode"}"#; + + let error_with_data = Error::new(ReservedErrorCode::ParseError, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_DATA); + + let error_without_data = Error::new(ReservedErrorCode::MethodNotFound, None::); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITHOUT_DATA); + + let error_with_bad_data = Error::new(ReservedErrorCode::InvalidParams, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_BAD_DATA); + } + + #[test] + fn should_construct_custom_error() { + const EXPECTED_WITH_DATA: &str = + r#"{"code":-123,"message":"Valid test error","data":{"id":1314,"context":"TEST"}}"#; + const EXPECTED_WITHOUT_DATA: &str = + r#"{"code":-123,"message":"Valid test error","data":null}"#; + const EXPECTED_WITH_BAD_DATA: &str = r#"{"code":-32603,"message":"Internal error","data":"failed to json-encode additional info in json-rpc error: won't encode"}"#; + + let good_error_code = TestErrorCode { + in_reserved_range: false, + }; + + let error_with_data = Error::new(good_error_code, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_DATA); + + let error_without_data = Error::new(good_error_code, ()); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITHOUT_DATA); + + let error_with_bad_data = Error::new(good_error_code, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED_WITH_BAD_DATA); + } + + #[test] + fn should_fall_back_to_internal_error_on_bad_custom_error() { + const EXPECTED: &str = r#"{"code":-32603,"message":"Internal error","data":"attempted to return reserved error code -32603"}"#; + + let bad_error_code = TestErrorCode { + in_reserved_range: true, + }; + + let error_with_data = Error::new(bad_error_code, AdditionalInfo::default()); + let encoded = serde_json::to_string(&error_with_data).unwrap(); + assert_eq!(encoded, EXPECTED); + + let error_without_data = Error::new(bad_error_code, None::); + let encoded = serde_json::to_string(&error_without_data).unwrap(); + assert_eq!(encoded, EXPECTED); + + let error_with_bad_data = Error::new(bad_error_code, FailToEncode); + let encoded = serde_json::to_string(&error_with_bad_data).unwrap(); + assert_eq!(encoded, EXPECTED); + } +} diff --git a/json_rpc/src/filters.rs b/json_rpc/src/filters.rs new file mode 100644 index 00000000..bc523511 --- /dev/null +++ b/json_rpc/src/filters.rs @@ -0,0 +1,205 @@ +//! Warp filters which can be combined to provide JSON-RPC endpoints. +//! +//! Generally these lower-level filters will not need to be explicitly called. Instead, +//! [`casper_json_rpc::route()`](crate::route) should be sufficient. + +#[cfg(test)] +mod tests; + +use bytes::Bytes; +use http::{header::CONTENT_TYPE, HeaderMap, StatusCode}; +use serde_json::{json, Map, Value}; +use tracing::{debug, trace, warn}; +use warp::{ + body, + filters::BoxedFilter, + reject::{self, Rejection}, + reply::{self, WithStatus}, + Filter, +}; + +use crate::{ + error::{Error, ReservedErrorCode}, + rejections::{BodyTooLarge, MissingContentTypeHeader, MissingId, UnsupportedMediaType}, + request::{ErrorOrRejection, Request}, + request_handlers::RequestHandlers, + response::Response, +}; + +const CONTENT_TYPE_VALUE: &str = "application/json"; + +/// Returns a boxed warp filter which handles the initial setup. +/// +/// This includes: +/// * setting the full path +/// * setting the method to POST +/// * ensuring the "content-type" header exists and is set to "application/json" +/// * ensuring the body has at most `max_body_bytes` bytes +pub fn base_filter>(path: P, max_body_bytes: u32) -> BoxedFilter<()> { + let path = path.as_ref().to_string(); + warp::path::path(path) + .and(warp::path::end()) + .and(warp::filters::method::post()) + .and( + warp::filters::header::headers_cloned().and_then(|headers: HeaderMap| async move { + for (name, value) in headers.iter() { + if name.as_str() == CONTENT_TYPE.as_str() { + if value + .as_bytes() + .eq_ignore_ascii_case(CONTENT_TYPE_VALUE.as_bytes()) + { + return Ok(()); + } else { + trace!(content_type = ?value.to_str(), "invalid {}", CONTENT_TYPE); + return Err(reject::custom(UnsupportedMediaType)); + } + } + } + trace!("missing {}", CONTENT_TYPE); + Err(reject::custom(MissingContentTypeHeader)) + }), + ) + .untuple_one() + .and(body::content_length_limit(max_body_bytes as u64).or_else( + move |_rejection| async move { Err(reject::custom(BodyTooLarge(max_body_bytes))) }, + )) + .boxed() +} + +/// Handles parsing a JSON-RPC request from the given HTTP body, executing it using the appropriate +/// handler, and providing a JSON-RPC response (which could be a success or failure). +/// +/// Returns an `Err(Rejection)` only if the request is a Notification as per the JSON-RPC +/// specification, i.e. the request doesn't contain an "id" field. In this case, no JSON-RPC +/// response is sent to the client. +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +async fn handle_body( + body: Bytes, + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> Result { + let response = match serde_json::from_slice::>(&body) { + Ok(unvalidated_request) => match Request::new(unvalidated_request, allow_unknown_fields) { + Ok(request) => handlers.handle_request(request, body.len()).await, + Err(ErrorOrRejection::Error { id, error }) => { + debug!(?error, "got an invalid request"); + Response::new_failure(id, error) + } + Err(ErrorOrRejection::Rejection(rejection)) => { + debug!(?rejection, "rejecting an invalid request"); + return Err(rejection); + } + }, + Err(error) => { + debug!(%error, "got bad json"); + let error = Error::new(ReservedErrorCode::ParseError, error.to_string()); + Response::new_failure(Value::Null, error) + } + }; + Ok(response) +} + +/// Returns a boxed warp filter which handles parsing a JSON-RPC request from the given HTTP body, +/// executing it using the appropriate handler, and providing a reply. +/// +/// The reply will normally be built from a JSON-RPC response (which could be a success or failure). +/// +/// However, the reply could be built from a [`Rejection`] if the request is a Notification as per +/// the JSON-RPC specification, i.e. the request doesn't contain an "id" field. In this case, no +/// JSON-RPC response is sent to the client, only an HTTP response. +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +pub fn main_filter( + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> BoxedFilter<(WithStatus,)> { + body::bytes() + .and_then(move |body| { + let handlers = handlers.clone(); + async move { handle_body(body, handlers, allow_unknown_fields).await } + }) + .map(|response| reply::with_status(reply::json(&response), StatusCode::OK)) + .boxed() +} + +/// Handler for rejections where no JSON-RPC response is sent, but an HTTP response is required. +/// +/// The HTTP response body will be a JSON object of the form: +/// ```json +/// { "message": } +/// ``` +pub async fn handle_rejection(error: Rejection) -> Result, Rejection> { + let code; + let message; + + if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::UNSUPPORTED_MEDIA_TYPE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::PAYLOAD_TOO_LARGE; + } else if error.is_not_found() { + trace!("{:?}", error); + message = "Path not found".to_string(); + code = StatusCode::NOT_FOUND; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::METHOD_NOT_ALLOWED; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::BAD_REQUEST; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::LENGTH_REQUIRED; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::PAYLOAD_TOO_LARGE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::UNSUPPORTED_MEDIA_TYPE; + } else if let Some(rejection) = error.find::() { + trace!("{:?}", rejection); + message = rejection.to_string(); + code = StatusCode::FORBIDDEN; + } else { + // We should handle all rejection types before this. + warn!(?error, "unhandled warp rejection in json-rpc server"); + message = format!("Internal server error: unhandled rejection: {:?}", error); + code = StatusCode::INTERNAL_SERVER_ERROR; + } + + Ok(reply::with_status( + reply::json(&json!({ "message": message })), + code, + )) +} diff --git a/json_rpc/src/filters/tests.rs b/json_rpc/src/filters/tests.rs new file mode 100644 index 00000000..b771a8d4 --- /dev/null +++ b/json_rpc/src/filters/tests.rs @@ -0,0 +1,18 @@ +mod base_filter_with_recovery_tests; +mod main_filter_with_recovery_tests; + +use serde::Deserialize; + +/// The HTTP response body returned in the event of a warp rejection. +#[derive(Deserialize)] +#[serde(deny_unknown_fields)] +struct ResponseBodyOnRejection { + message: String, +} + +impl ResponseBodyOnRejection { + async fn from_response(response: http::Response) -> Self { + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + serde_json::from_slice(&body_bytes).unwrap() + } +} diff --git a/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs b/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs new file mode 100644 index 00000000..361893eb --- /dev/null +++ b/json_rpc/src/filters/tests/base_filter_with_recovery_tests.rs @@ -0,0 +1,220 @@ +use http::StatusCode; +use warp::{filters::BoxedFilter, reply, test::RequestBuilder, Filter, Reply}; + +use super::ResponseBodyOnRejection; +use crate::filters::{base_filter, handle_rejection, CONTENT_TYPE_VALUE}; + +const PATH: &str = "rpc"; +const MAX_BODY_BYTES: u32 = 10; + +fn base_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + base_filter(PATH, MAX_BODY_BYTES) + .map(reply) // return an empty body on success + .with(warp::cors().allow_origin("http://a.com")) + .recover(handle_rejection) // or convert a rejection to JSON-encoded `ResponseBody` + .boxed() +} + +fn valid_base_filter_request_builder() -> RequestBuilder { + warp::test::request() + .path(&format!("/{}", PATH)) + .header("content-type", CONTENT_TYPE_VALUE) + .method("POST") + .body([0_u8; MAX_BODY_BYTES as usize]) +} + +#[tokio::test] +async fn should_accept_valid_request() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + assert!(body_bytes.is_empty()); +} + +#[tokio::test] +async fn should_reject_invalid_path() { + async fn test_with_invalid_path(path: &str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .path(path) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::NOT_FOUND); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!(response_body.message, "Path not found"); + } + + let _ = env_logger::try_init(); + + // A root path. + test_with_invalid_path("/").await; + + // A path which doesn't match the server's. + test_with_invalid_path("/not_the_right_path").await; + + // A path which extends the server's + test_with_invalid_path(&format!("/{0}/{0}", PATH)).await; +} + +#[tokio::test] +async fn should_reject_unsupported_http_method() { + async fn test_with_unsupported_method(method: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .method(method) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!(response_body.message, "HTTP method not allowed"); + } + + let _ = env_logger::try_init(); + + test_with_unsupported_method("GET").await; + test_with_unsupported_method("PUT").await; + test_with_unsupported_method("DELETE").await; + test_with_unsupported_method("HEAD").await; + test_with_unsupported_method("OPTIONS").await; + test_with_unsupported_method("CONNECT").await; + test_with_unsupported_method("PATCH").await; + test_with_unsupported_method("TRACE").await; + test_with_unsupported_method("a").await; +} + +#[tokio::test] +async fn should_reject_missing_content_type_header() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = warp::test::request() + .path(&format!("/{}", PATH)) + .method("POST") + .body("") + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request's content-type is not set" + ); +} + +#[tokio::test] +async fn should_reject_invalid_content_type() { + async fn test_invalid_content_type(value: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header("content-type", value) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request's content-type is not supported" + ); + } + + let _ = env_logger::try_init(); + + test_invalid_content_type("text/html").await; + test_invalid_content_type("multipart/form-data").await; + test_invalid_content_type("a").await; + test_invalid_content_type("").await; +} + +#[tokio::test] +async fn should_reject_large_body() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .body([0_u8; MAX_BODY_BYTES as usize + 1]) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "The request payload exceeds the maximum allowed of 10 bytes" + ); +} + +#[tokio::test] +async fn should_reject_cors() { + let _ = env_logger::try_init(); + + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header("Origin", "http://b.com") + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::FORBIDDEN); + let response_body = ResponseBodyOnRejection::from_response(response).await; + assert_eq!( + response_body.message, + "CORS request forbidden: origin not allowed" + ); +} + +#[tokio::test] +async fn should_handle_any_case_content_type() { + async fn test_content_type(key: &'static str, value: &'static str) { + let filter = base_filter_with_recovery(); + + let response = valid_base_filter_request_builder() + .header(key, value) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + assert!(body_bytes.is_empty()); + } + + let _ = env_logger::try_init(); + + test_content_type("Content-Type", "application/json").await; + test_content_type("Content-Type", "Application/JSON").await; + test_content_type("content-type", "application/json").await; + test_content_type("content-type", "Application/JSON").await; + test_content_type("CONTENT-TYPE", "APPLICATION/JSON").await; + test_content_type("CoNtEnT-tYpE", "ApPliCaTiOn/JsOn").await; +} diff --git a/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs b/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs new file mode 100644 index 00000000..1e158921 --- /dev/null +++ b/json_rpc/src/filters/tests/main_filter_with_recovery_tests.rs @@ -0,0 +1,320 @@ +use std::sync::Arc; + +use http::StatusCode; +use serde::{ + ser::{Error as _, Serializer}, + Deserialize, Serialize, +}; +use serde_json::Value; +use warp::{filters::BoxedFilter, Filter, Reply}; + +use super::ResponseBodyOnRejection; +use crate::{ + filters::{handle_rejection, main_filter}, + Error, Params, RequestHandlersBuilder, ReservedErrorCode, Response, +}; + +const GET_GOOD_THING: &str = "get good thing"; +const GET_BAD_THING: &str = "get bad thing"; + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] +struct GoodThing { + good_thing: String, +} + +/// A type which always errors when being serialized. +struct BadThing; + +impl Serialize for BadThing { + fn serialize(&self, _serializer: S) -> Result { + Err(S::Error::custom("won't encode")) + } +} + +async fn get_good_thing(params: Option) -> Result { + match params { + Some(Params::Array(array)) => Ok(GoodThing { + good_thing: array[0].as_str().unwrap().to_string(), + }), + _ => Err(Error::new(ReservedErrorCode::InvalidParams, "no params")), + } +} + +async fn get_bad_thing(_params: Option) -> Result { + Ok(BadThing) +} + +async fn from_http_response(response: http::Response) -> Response { + let body_bytes = hyper::body::to_bytes(response.into_body()).await.unwrap(); + serde_json::from_slice(&body_bytes).unwrap() +} + +fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + handlers.register_handler(GET_GOOD_THING, Arc::new(get_good_thing)); + handlers.register_handler(GET_BAD_THING, Arc::new(get_bad_thing)); + let handlers = handlers.build(); + + main_filter(handlers, false) + .recover(handle_rejection) + .boxed() +} + +#[tokio::test] +async fn should_handle_valid_request() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_good_thing` and return `Ok` as "params" is Some, causing a + // Response::Success to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.result(), + Some(GoodThing { + good_thing: "one".to_string() + }) + ); +} + +#[tokio::test] +async fn should_handle_valid_request_where_rpc_returns_error() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_good_thing` and return `Err` as "params" is None, causing + // a Response::Failure (invalid params) to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get good thing"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new(ReservedErrorCode::InvalidParams, "no params") + ); +} + +#[tokio::test] +async fn should_handle_valid_request_where_result_encoding_fails() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `fn get_bad_thing` which returns a type which fails to encode, + // causing a Response::Failure (internal error) to be returned to the client. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":"a","method":"get bad thing"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), "a"); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InternalError, + "failed to encode json-rpc response value: won't encode" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_for_method_not_registered() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the ID has fractional parts. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":"not registered","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::MethodNotFound, + "'not registered' is not a supported json-rpc method on this server" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_with_invalid_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the ID has fractional parts. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1.1,"method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number" + ) + ); +} + +#[tokio::test] +async fn should_handle_request_with_no_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return no JSON-RPC response, only an + // HTTP response (bad request) to the client as no ID was provided. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","method":"get good thing","params":["one"]}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::BAD_REQUEST); + let response_body = ResponseBodyOnRejection::from_response(http_response).await; + assert_eq!( + response_body.message, + "The request is missing the 'id' field" + ); +} + +#[tokio::test] +async fn should_handle_request_with_extra_field() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as the request has an extra field. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":"get good thing","params":[2],"extra":"field"}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "Unexpected field: 'extra'" + ) + ); +} + +#[tokio::test] +async fn should_handle_malformed_request_with_valid_id() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client, but with the ID included in the response as it was able to be parsed. + let http_response = warp::test::request() + .body(r#"{"jsonrpc":"2.0","id":1,"method":{"not":"a string"}}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), 1); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'method' to be a String" + ) + ); +} + +#[tokio::test] +async fn should_handle_malformed_request_but_valid_json() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (invalid + // request) to the client as it can't be parsed as a JSON-RPC request. + let http_response = warp::test::request() + .body(r#"{"a":1}"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new(ReservedErrorCode::InvalidRequest, "Missing 'jsonrpc' field") + ); +} + +#[tokio::test] +async fn should_handle_invalid_json() { + let _ = env_logger::try_init(); + + let filter = main_filter_with_recovery(); + + // This should get handled by `filters::handle_body` and return Response::Failure (parse error) + // to the client as it cannot be parsed as JSON. + let http_response = warp::test::request() + .body(r#"a"#) + .filter(&filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let rpc_response = from_http_response(http_response).await; + assert_eq!(rpc_response.id(), &Value::Null); + assert_eq!( + rpc_response.error().unwrap(), + &Error::new( + ReservedErrorCode::ParseError, + "expected value at line 1 column 1" + ) + ); +} diff --git a/json_rpc/src/lib.rs b/json_rpc/src/lib.rs new file mode 100644 index 00000000..f82a79cc --- /dev/null +++ b/json_rpc/src/lib.rs @@ -0,0 +1,177 @@ +//! # casper-json-rpc +//! +//! A library suitable for use as the framework for a JSON-RPC server. +//! +//! # Usage +//! +//! Normally usage will involve two steps: +//! * construct a set of request handlers using a [`RequestHandlersBuilder`] +//! * call [`casper_json_rpc::route`](route) to construct a boxed warp filter ready to be passed +//! to [`warp::service`](https://docs.rs/warp/latest/warp/fn.service.html) for example +//! +//! # Example +//! +//! ```no_run +//! use casper_json_rpc::{Error, Params, RequestHandlersBuilder}; +//! use std::{convert::Infallible, sync::Arc}; +//! +//! # #[allow(unused)] +//! async fn get(params: Option) -> Result { +//! // * parse params or return `ReservedErrorCode::InvalidParams` error +//! // * handle request and return result +//! Ok("got it".to_string()) +//! } +//! +//! # #[allow(unused)] +//! async fn put(params: Option, other_input: &str) -> Result { +//! Ok(other_input.to_string()) +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Register handlers for methods "get" and "put". +//! let mut handlers = RequestHandlersBuilder::new(); +//! handlers.register_handler("get", Arc::new(get)); +//! let put_handler = move |params| async move { put(params, "other input").await }; +//! handlers.register_handler("put", Arc::new(put_handler)); +//! let handlers = handlers.build(); +//! +//! // Get the new route. +//! let path = "rpc"; +//! let max_body_bytes = 1024; +//! let allow_unknown_fields = false; +//! let route = casper_json_rpc::route(path, max_body_bytes, handlers, allow_unknown_fields); +//! +//! // Convert it into a `Service` and run it. +//! let make_svc = hyper::service::make_service_fn(move |_| { +//! let svc = warp::service(route.clone()); +//! async move { Ok::<_, Infallible>(svc.clone()) } +//! }); +//! +//! hyper::Server::bind(&([127, 0, 0, 1], 3030).into()) +//! .serve(make_svc) +//! .await +//! .unwrap(); +//! } +//! ``` +//! +//! # Errors +//! +//! To return a JSON-RPC response indicating an error, use [`Error::new`]. Most error conditions +//! which require returning a reserved error are already handled in the provided warp filters. The +//! only exception is [`ReservedErrorCode::InvalidParams`] which should be returned by any RPC +//! handler which deems the provided `params: Option` to be invalid for any reason. +//! +//! Generally a set of custom error codes should be provided. These should all implement +//! [`ErrorCodeT`]. + +#![doc(html_root_url = "https://docs.rs/casper-json-rpc/1.1.0")] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(deny(warnings))) +)] +#![warn( + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_qualifications +)] + +mod error; +pub mod filters; +mod rejections; +mod request; +mod request_handlers; +mod response; + +use http::{header::CONTENT_TYPE, Method}; +use warp::{filters::BoxedFilter, Filter, Reply}; + +pub use error::{Error, ErrorCodeT, ReservedErrorCode}; +pub use request::Params; +pub use request_handlers::{RequestHandlers, RequestHandlersBuilder}; +pub use response::Response; + +const JSON_RPC_VERSION: &str = "2.0"; + +/// Specifies the CORS origin +pub enum CorsOrigin { + /// Any (*) origin is allowed. + Any, + /// Only the specified origin is allowed. + Specified(String), +} + +/// Constructs a set of warp filters suitable for use in a JSON-RPC server. +/// +/// `path` specifies the exact HTTP path for JSON-RPC requests, e.g. "rpc" will match requests on +/// exactly "/rpc", and not "/rpc/other". +/// +/// `max_body_bytes` sets an upper limit for the number of bytes in the HTTP request body. For +/// further details, see +/// [`warp::filters::body::content_length_limit`](https://docs.rs/warp/latest/warp/filters/body/fn.content_length_limit.html). +/// +/// `handlers` is the map of functions to which incoming requests will be dispatched. These are +/// keyed by the JSON-RPC request's "method". +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +/// +/// For further details, see the docs for the [`filters`] functions. +pub fn route>( + path: P, + max_body_bytes: u32, + handlers: RequestHandlers, + allow_unknown_fields: bool, +) -> BoxedFilter<(impl Reply,)> { + filters::base_filter(path, max_body_bytes) + .and(filters::main_filter(handlers, allow_unknown_fields)) + .recover(filters::handle_rejection) + .boxed() +} + +/// Constructs a set of warp filters suitable for use in a JSON-RPC server. +/// +/// `path` specifies the exact HTTP path for JSON-RPC requests, e.g. "rpc" will match requests on +/// exactly "/rpc", and not "/rpc/other". +/// +/// `max_body_bytes` sets an upper limit for the number of bytes in the HTTP request body. For +/// further details, see +/// [`warp::filters::body::content_length_limit`](https://docs.rs/warp/latest/warp/filters/body/fn.content_length_limit.html). +/// +/// `handlers` is the map of functions to which incoming requests will be dispatched. These are +/// keyed by the JSON-RPC request's "method". +/// +/// If `allow_unknown_fields` is `false`, requests with unknown fields will cause the server to +/// respond with an error. +/// +/// Note that this is a convenience function combining the lower-level functions in [`filters`] +/// along with [a warp CORS filter](https://docs.rs/warp/latest/warp/filters/cors/index.html) which +/// * allows any origin or specified origin +/// * allows "content-type" as a header +/// * allows the method "POST" +/// +/// For further details, see the docs for the [`filters`] functions. +pub fn route_with_cors>( + path: P, + max_body_bytes: u32, + handlers: RequestHandlers, + allow_unknown_fields: bool, + cors_header: &CorsOrigin, +) -> BoxedFilter<(impl Reply,)> { + filters::base_filter(path, max_body_bytes) + .and(filters::main_filter(handlers, allow_unknown_fields)) + .recover(filters::handle_rejection) + .with(match cors_header { + CorsOrigin::Any => warp::cors() + .allow_any_origin() + .allow_header(CONTENT_TYPE) + .allow_method(Method::POST), + CorsOrigin::Specified(origin) => warp::cors() + .allow_origin(origin.as_str()) + .allow_header(CONTENT_TYPE) + .allow_method(Method::POST), + }) + .boxed() +} diff --git a/json_rpc/src/rejections.rs b/json_rpc/src/rejections.rs new file mode 100644 index 00000000..8219abbf --- /dev/null +++ b/json_rpc/src/rejections.rs @@ -0,0 +1,72 @@ +//! These types are used to allow a given warp filter to reject a request. The rejections are +//! handled in a subsequent function, where they are converted into meaningful responses. +//! +//! Rather than being returned to the client as a JSON-RPC response with the `error` field set, +//! they instead indicate a response at the HTTP level only. + +use std::fmt::{self, Display, Formatter}; + +use warp::reject::Reject; + +/// Indicates the "Content-Type" header of the request is not "application/json". +/// +/// This rejection is converted into an HTTP 415 (unsupported media type) error. +#[derive(Debug)] +pub(crate) struct UnsupportedMediaType; + +impl Display for UnsupportedMediaType { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request's content-type is not supported") + } +} + +impl Reject for UnsupportedMediaType {} + +/// Indicates the "Content-Type" header is missing from the request. +/// +/// This rejection is converted into an HTTP 400 (bad request) error. +#[derive(Debug)] +pub(crate) struct MissingContentTypeHeader; + +impl Display for MissingContentTypeHeader { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request's content-type is not set") + } +} + +impl Reject for MissingContentTypeHeader {} + +/// Indicates the JSON-RPC request is missing the `id` field. +/// +/// As per the JSON-RPC specification, this is classed as a Notification and the server should not +/// send a response. While no JSON-RPC response is generated for this error, we return an HTTP 400 +/// (bad request) error, as the node API does not support client Notifications. +#[derive(Debug)] +pub(crate) struct MissingId; + +impl Display for MissingId { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + formatter.write_str("The request is missing the 'id' field") + } +} + +impl Reject for MissingId {} + +/// Indicates the HTTP request body is greater than the maximum allowed. +/// +/// Wraps the configured maximum allowed on the server, set via the `max_body_bytes` parameter in +/// `base_filter()`. +#[derive(Debug)] +pub(crate) struct BodyTooLarge(pub(crate) u32); + +impl Display for BodyTooLarge { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!( + formatter, + "The request payload exceeds the maximum allowed of {} bytes", + self.0 + ) + } +} + +impl Reject for BodyTooLarge {} diff --git a/json_rpc/src/request.rs b/json_rpc/src/request.rs new file mode 100644 index 00000000..13411a76 --- /dev/null +++ b/json_rpc/src/request.rs @@ -0,0 +1,462 @@ +mod params; + +use itertools::Itertools; +use serde_json::{Map, Value}; +use warp::reject::{self, Rejection}; + +use crate::{ + error::{Error, ReservedErrorCode}, + rejections::MissingId, + JSON_RPC_VERSION, +}; +pub use params::Params; + +const JSONRPC_FIELD_NAME: &str = "jsonrpc"; +const METHOD_FIELD_NAME: &str = "method"; +const PARAMS_FIELD_NAME: &str = "params"; +const ID_FIELD_NAME: &str = "id"; + +/// Errors are returned to the client as a JSON-RPC response and HTTP 200 (OK), whereas rejections +/// cause no JSON-RPC response to be sent, but an appropriate HTTP 4xx error will be returned. +#[derive(Debug)] +pub(crate) enum ErrorOrRejection { + Error { id: Value, error: Error }, + Rejection(Rejection), +} + +/// A request which has been validated as conforming to the JSON-RPC specification. +pub(crate) struct Request { + pub id: Value, + pub method: String, + pub params: Option, +} + +/// Returns `Ok` if `id` is a String, Null or a Number with no fractional part. +fn is_valid(id: &Value) -> Result<(), Error> { + match id { + Value::String(_) | Value::Null => (), + Value::Number(number) => { + if number.is_f64() { + return Err(Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number", + )); + } + } + _ => { + return Err(Error::new( + ReservedErrorCode::InvalidRequest, + "'id' should be a string or integer", + )); + } + } + Ok(()) +} + +impl Request { + /// Returns `Ok` if the request is valid as per + /// [the JSON-RPC specification](https://www.jsonrpc.org/specification#request_object). + /// + /// Returns an `Error` in any of the following cases: + /// * "jsonrpc" field is not "2.0" + /// * "method" field is not a String + /// * "params" field is present, but is not an Array or Object + /// * "id" field is not a String, valid Number or Null + /// * "id" field is a Number with fractional part + /// * `allow_unknown_fields` is `false` and extra fields exist + /// + /// Returns a `Rejection` if the "id" field is `None`. + #[allow(clippy::result_large_err)] + pub(super) fn new( + mut request: Map, + allow_unknown_fields: bool, + ) -> Result { + // Just copy "id" field for now to return verbatim in any errors before we get to actually + // validating the "id" field itself. + let id = request.get(ID_FIELD_NAME).cloned().unwrap_or_default(); + + match request.remove(JSONRPC_FIELD_NAME) { + Some(Value::String(jsonrpc)) => { + if jsonrpc != JSON_RPC_VERSION { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Expected 'jsonrpc' to be '2.0', but got '{}'", jsonrpc), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + } + Some(Value::Number(jsonrpc)) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Expected 'jsonrpc' to be a String with value '2.0', but got a Number '{}'", + jsonrpc + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + Some(jsonrpc) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Expected 'jsonrpc' to be a String with value '2.0', but got '{}'", + jsonrpc + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + None => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Missing '{}' field", JSONRPC_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + } + + let method = match request.remove(METHOD_FIELD_NAME) { + Some(Value::String(method)) => method, + Some(_) => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Expected '{}' to be a String", METHOD_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + None => { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!("Missing '{}' field", METHOD_FIELD_NAME), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + }; + + let params = match request.remove(PARAMS_FIELD_NAME) { + Some(unvalidated_params) => Some(Params::try_from(&id, unvalidated_params)?), + None => None, + }; + + let id = match request.remove(ID_FIELD_NAME) { + Some(id) => { + is_valid(&id).map_err(|error| ErrorOrRejection::Error { + id: Value::Null, + error, + })?; + id + } + None => return Err(ErrorOrRejection::Rejection(reject::custom(MissingId))), + }; + + if !allow_unknown_fields && !request.is_empty() { + let error = Error::new( + ReservedErrorCode::InvalidRequest, + format!( + "Unexpected field{}: {}", + if request.len() > 1 { "s" } else { "" }, + request.keys().map(|f| format!("'{}'", f)).join(", ") + ), + ); + return Err(ErrorOrRejection::Error { id, error }); + } + + Ok(Request { id, method, params }) + } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn should_validate_using_valid_id() { + fn run_test(id: Value) { + let method = "a".to_string(); + let params_inner = vec![Value::Bool(true)]; + + let unvalidated = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: id, + METHOD_FIELD_NAME: method, + PARAMS_FIELD_NAME: params_inner, + }) + .as_object() + .cloned() + .unwrap(); + + let request = Request::new(unvalidated, false).unwrap(); + assert_eq!(request.id, id); + assert_eq!(request.method, method); + assert_eq!(request.params.unwrap(), Params::Array(params_inner)); + } + + run_test(Value::String("the id".to_string())); + run_test(json!(1314)); + run_test(Value::Null); + } + + #[test] + fn should_fail_to_validate_id_with_wrong_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: true, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::Null, + error, + }) => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "'id' should be a string or integer" + ) + ); + } + + #[test] + fn should_fail_to_validate_id_with_fractional_part() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: 1.1, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::Null, + error, + }) => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "'id' must not contain fractional parts if it is a number" + ) + ); + } + + #[test] + fn should_reject_with_missing_id() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + match Request::new(request, false) { + Err(ErrorOrRejection::Rejection(_)) => (), + _ => panic!("should be rejection"), + }; + } + + #[test] + fn should_fail_to_validate_with_invalid_jsonrpc_field_value() { + let request = json!({ + JSONRPC_FIELD_NAME: "2.1", + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'jsonrpc' to be '2.0', but got '2.1'" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_jsonrpc_field_type() { + let request = json!({ + JSONRPC_FIELD_NAME: true, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'jsonrpc' to be a String with value '2.0', but got 'true'" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_missing_jsonrpc_field() { + let request = json!({ + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new(ReservedErrorCode::InvalidRequest, "Missing 'jsonrpc' field") + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_method_field_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: 1, + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Expected 'method' to be a String" + ) + ); + } + + #[test] + fn should_fail_to_validate_with_missing_method_field() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new(ReservedErrorCode::InvalidRequest, "Missing 'method' field") + ); + } + + #[test] + fn should_fail_to_validate_with_invalid_params_type() { + let request = json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + PARAMS_FIELD_NAME: "a", + }) + .as_object() + .cloned() + .unwrap(); + + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "If present, 'params' must be an Array or Object, but was a String" + ) + ); + } + + fn request_with_extra_fields() -> Map { + json!({ + JSONRPC_FIELD_NAME: JSON_RPC_VERSION, + ID_FIELD_NAME: "a", + METHOD_FIELD_NAME: "a", + "extra": 1, + "another": true, + }) + .as_object() + .cloned() + .unwrap() + } + + #[test] + fn should_validate_with_extra_fields_if_allowed() { + let request = request_with_extra_fields(); + assert!(Request::new(request, true).is_ok()); + } + + #[test] + fn should_fail_to_validate_with_extra_fields_if_disallowed() { + let request = request_with_extra_fields(); + let error = match Request::new(request, false) { + Err(ErrorOrRejection::Error { + id: Value::String(id), + error, + }) if id == "a" => error, + _ => panic!("should be error"), + }; + assert_eq!( + error, + Error::new( + ReservedErrorCode::InvalidRequest, + "Unexpected fields: 'another', 'extra'" + ) + ); + } +} diff --git a/json_rpc/src/request/params.rs b/json_rpc/src/request/params.rs new file mode 100644 index 00000000..facadfc4 --- /dev/null +++ b/json_rpc/src/request/params.rs @@ -0,0 +1,203 @@ +use std::fmt::{self, Display, Formatter}; + +use serde_json::{Map, Value}; + +use super::ErrorOrRejection; +use crate::error::{Error, ReservedErrorCode}; + +/// The "params" field of a JSON-RPC request. +/// +/// As per [the JSON-RPC specification](https://www.jsonrpc.org/specification#parameter_structures), +/// if present these must be a JSON Array or Object. +/// +/// **NOTE:** Currently we treat '"params": null' as '"params": []', but this deviation from the +/// standard will be removed in an upcoming release, and `null` will become an invalid value. +/// +/// `Params` is effectively a restricted [`serde_json::Value`], and can be converted to a `Value` +/// using `Value::from()` if required. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum Params { + /// Represents a JSON Array. + Array(Vec), + /// Represents a JSON Object. + Object(Map), +} + +impl Params { + #[allow(clippy::result_large_err)] + pub(super) fn try_from(request_id: &Value, params: Value) -> Result { + let err_invalid_request = |additional_info: &str| { + let error = Error::new(ReservedErrorCode::InvalidRequest, additional_info); + Err(ErrorOrRejection::Error { + id: request_id.clone(), + error, + }) + }; + + match params { + Value::Null => Ok(Params::Array(vec![])), + Value::Bool(false) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was 'false'", + ), + Value::Bool(true) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was 'true'", + ), + Value::Number(_) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was a Number", + ), + Value::String(_) => err_invalid_request( + "If present, 'params' must be an Array or Object, but was a String", + ), + Value::Array(array) => Ok(Params::Array(array)), + Value::Object(map) => Ok(Params::Object(map)), + } + } + + /// Returns `true` if `self` is an Array, otherwise returns `false`. + pub fn is_array(&self) -> bool { + self.as_array().is_some() + } + + /// Returns a reference to the inner `Vec` if `self` is an Array, otherwise returns `None`. + pub fn as_array(&self) -> Option<&Vec> { + match self { + Params::Array(array) => Some(array), + _ => None, + } + } + + /// Returns a mutable reference to the inner `Vec` if `self` is an Array, otherwise returns + /// `None`. + pub fn as_array_mut(&mut self) -> Option<&mut Vec> { + match self { + Params::Array(array) => Some(array), + _ => None, + } + } + + /// Returns `true` if `self` is an Object, otherwise returns `false`. + pub fn is_object(&self) -> bool { + self.as_object().is_some() + } + + /// Returns a reference to the inner `Map` if `self` is an Object, otherwise returns `None`. + pub fn as_object(&self) -> Option<&Map> { + match self { + Params::Object(map) => Some(map), + _ => None, + } + } + + /// Returns a mutable reference to the inner `Map` if `self` is an Object, otherwise returns + /// `None`. + pub fn as_object_mut(&mut self) -> Option<&mut Map> { + match self { + Params::Object(map) => Some(map), + _ => None, + } + } + + /// Returns `true` if `self` is an empty Array or an empty Object, otherwise returns `false`. + pub fn is_empty(&self) -> bool { + match self { + Params::Array(array) => array.is_empty(), + Params::Object(map) => map.is_empty(), + } + } +} + +impl Display for Params { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + Display::fmt(&Value::from(self.clone()), formatter) + } +} + +/// The default value for `Params` is an empty Array. +impl Default for Params { + fn default() -> Self { + Params::Array(vec![]) + } +} + +impl From for Value { + fn from(params: Params) -> Self { + match params { + Params::Array(array) => Value::Array(array), + Params::Object(map) => Value::Object(map), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn should_fail_to_convert_invalid_params(bad_params: Value, expected_invalid_type_msg: &str) { + let original_id = Value::from(1_i8); + match Params::try_from(&original_id, bad_params).unwrap_err() { + ErrorOrRejection::Error { id, error } => { + assert_eq!(id, original_id); + let expected_error = format!( + r#"{{"code":-32600,"message":"Invalid Request","data":"If present, 'params' must be an Array or Object, but was {}"}}"#, + expected_invalid_type_msg + ); + assert_eq!(serde_json::to_string(&error).unwrap(), expected_error); + } + other => panic!("unexpected: {:?}", other), + } + } + + #[test] + fn should_convert_params_from_null() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Null).unwrap(); + assert!(matches!(params, Params::Array(v) if v.is_empty())); + } + + #[test] + fn should_fail_to_convert_params_from_false() { + should_fail_to_convert_invalid_params(Value::Bool(false), "'false'") + } + + #[test] + fn should_fail_to_convert_params_from_true() { + should_fail_to_convert_invalid_params(Value::Bool(true), "'true'") + } + + #[test] + fn should_fail_to_convert_params_from_a_number() { + should_fail_to_convert_invalid_params(Value::from(9_u8), "a Number") + } + + #[test] + fn should_fail_to_convert_params_from_a_string() { + should_fail_to_convert_invalid_params(Value::from("s"), "a String") + } + + #[test] + fn should_convert_params_from_an_array() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Array(vec![])).unwrap(); + assert!(matches!(params, Params::Array(v) if v.is_empty())); + + let array = vec![Value::from(9_i16), Value::Bool(false)]; + let params = Params::try_from(&original_id, Value::Array(array.clone())).unwrap(); + assert!(matches!(params, Params::Array(v) if v == array)); + } + + #[test] + fn should_convert_params_from_an_object() { + let original_id = Value::from(1_i8); + + let params = Params::try_from(&original_id, Value::Object(Map::new())).unwrap(); + assert!(matches!(params, Params::Object(v) if v.is_empty())); + + let mut map = Map::new(); + map.insert("a".to_string(), Value::from(9_i16)); + map.insert("b".to_string(), Value::Bool(false)); + let params = Params::try_from(&original_id, Value::Object(map.clone())).unwrap(); + assert!(matches!(params, Params::Object(v) if v == map)); + } +} diff --git a/json_rpc/src/request_handlers.rs b/json_rpc/src/request_handlers.rs new file mode 100644 index 00000000..97fa4871 --- /dev/null +++ b/json_rpc/src/request_handlers.rs @@ -0,0 +1,130 @@ +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc, time::Instant}; + +use futures::FutureExt; +use metrics::rpc::{inc_method_call, observe_response_time, register_request_size}; +use serde::Serialize; +use serde_json::Value; +use tracing::{debug, error}; + +use crate::{ + error::{Error, ReservedErrorCode}, + request::{Params, Request}, + response::Response, +}; + +/// A boxed future of `Result`; the return type of a request-handling closure. +type HandleRequestFuture = Pin> + Send>>; +/// A request-handling closure. +type RequestHandler = Arc) -> HandleRequestFuture + Send + Sync>; + +/// A collection of request-handlers, indexed by the JSON-RPC "method" applicable to each. +/// +/// There needs to be a unique handler for each JSON-RPC request "method" to be handled. Handlers +/// are added via a [`RequestHandlersBuilder`]. +#[derive(Clone)] +pub struct RequestHandlers(Arc>); + +impl RequestHandlers { + /// Finds the relevant handler for the given request's "method" field, and invokes it with the + /// given "params" value. + /// + /// If a handler cannot be found, a MethodNotFound error is created. In this case, or if + /// invoking the handler yields an [`Error`], the error is converted into a + /// [`Response::Failure`]. + /// + /// Otherwise a [`Response::Success`] is returned. + pub(crate) async fn handle_request(&self, request: Request, request_size: usize) -> Response { + let start = Instant::now(); + let request_method = request.method.as_str(); + let handler = match self.0.get(request_method) { + Some(handler) => Arc::clone(handler), + None => { + let elapsed = start.elapsed(); + observe_response_time("unknown-handler", "unknown-handler", elapsed); + debug!(requested_method = %request.method.as_str(), "failed to get handler"); + let error = Error::new( + ReservedErrorCode::MethodNotFound, + format!( + "'{}' is not a supported json-rpc method on this server", + request.method.as_str() + ), + ); + return Response::new_failure(request.id, error); + } + }; + inc_method_call(request_method); + register_request_size(request_method, request_size as f64); + + match handler(request.params).await { + Ok(result) => { + let elapsed = start.elapsed(); + observe_response_time(request_method, "success", elapsed); + Response::new_success(request.id, result) + } + Err(error) => { + let elapsed = start.elapsed(); + observe_response_time(request_method, &error.code().to_string(), elapsed); + Response::new_failure(request.id, error) + } + } + } +} + +/// A builder for [`RequestHandlers`]. +// +// This builder exists so the internal `HashMap` can be populated before it is made immutable behind +// the `Arc` in the `RequestHandlers`. +#[derive(Default)] +pub struct RequestHandlersBuilder(HashMap<&'static str, RequestHandler>); + +impl RequestHandlersBuilder { + /// Returns a new builder. + pub fn new() -> Self { + Self::default() + } + + /// Adds a new request-handler which will be called to handle all JSON-RPC requests with the + /// given "method" field. + /// + /// The handler should be an async closure or function with a signature like: + /// ```ignore + /// async fn handle_it(params: Option) -> Result + /// ``` + /// where `T` implements `Serialize` and will be used as the JSON-RPC response's "result" field. + pub fn register_handler(&mut self, method: &'static str, handler: Arc) + where + Func: Fn(Option) -> Fut + Send + Sync + 'static, + Fut: Future> + Send, + T: Serialize + 'static, + { + let handler = Arc::clone(&handler); + // The provided handler returns a future with output of `Result`. We need to + // convert that to a boxed future with output `Result` to store it in a + // homogenous collection. + let wrapped_handler = move |maybe_params| { + let handler = Arc::clone(&handler); + async move { + let success = Arc::clone(&handler)(maybe_params).await?; + serde_json::to_value(success).map_err(|error| { + error!(%error, "failed to encode json-rpc response value"); + Error::new( + ReservedErrorCode::InternalError, + format!("failed to encode json-rpc response value: {}", error), + ) + }) + } + .boxed() + }; + if self.0.insert(method, Arc::new(wrapped_handler)).is_some() { + error!( + method, + "already registered a handler for this json-rpc request method" + ); + } + } + + /// Finalize building by converting `self` to a [`RequestHandlers`]. + pub fn build(self) -> RequestHandlers { + RequestHandlers(Arc::new(self.0)) + } +} diff --git a/json_rpc/src/response.rs b/json_rpc/src/response.rs new file mode 100644 index 00000000..b9daf81b --- /dev/null +++ b/json_rpc/src/response.rs @@ -0,0 +1,108 @@ +use std::borrow::Cow; + +use serde::{ + de::{DeserializeOwned, Deserializer}, + Deserialize, Serialize, +}; +use serde_json::Value; +use tracing::error; + +use super::{Error, JSON_RPC_VERSION}; + +/// A JSON-RPC response. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[serde(deny_unknown_fields, untagged)] +pub enum Response { + /// A successful RPC execution. + Success { + /// The JSON-RPC version field. + #[serde(deserialize_with = "set_jsonrpc_field")] + jsonrpc: Cow<'static, str>, + /// The same ID as was passed in the corresponding request. + id: Value, + /// The successful result of executing the RPC. + result: Value, + }, + /// An RPC execution which failed. + Failure { + /// The JSON-RPC version field. + #[serde(deserialize_with = "set_jsonrpc_field")] + jsonrpc: Cow<'static, str>, + /// The same ID as was passed in the corresponding request. + id: Value, + /// The error encountered while executing the RPC. + error: Error, + }, +} + +impl Response { + /// Returns a new `Response::Success`. + pub fn new_success(id: Value, result: Value) -> Self { + Response::Success { + jsonrpc: Cow::Borrowed(JSON_RPC_VERSION), + id, + result, + } + } + + /// Returns a new `Response::Failure`. + pub fn new_failure(id: Value, error: Error) -> Self { + Response::Failure { + jsonrpc: Cow::Borrowed(JSON_RPC_VERSION), + id, + error, + } + } + + /// Returns `true` is this is a `Response::Success`. + pub fn is_success(&self) -> bool { + matches!(self, Response::Success { .. }) + } + + /// Returns `true` is this is a `Response::Failure`. + pub fn is_failure(&self) -> bool { + matches!(self, Response::Failure { .. }) + } + + /// Returns the "result" field, or `None` if this is a `Response::Failure`. + pub fn raw_result(&self) -> Option<&Value> { + match &self { + Response::Success { result, .. } => Some(result), + Response::Failure { .. } => None, + } + } + + /// Returns the "result" field parsed as `T`, or `None` if this is a `Response::Failure` or if + /// parsing fails. + pub fn result(&self) -> Option { + match &self { + Response::Success { result, .. } => serde_json::from_value(result.clone()) + .map_err(|error| { + error!("failed to parse: {}", error); + }) + .ok(), + Response::Failure { .. } => None, + } + } + + /// Returns the "error" field or `None` if this is a `Response::Success`. + pub fn error(&self) -> Option<&Error> { + match &self { + Response::Success { .. } => None, + Response::Failure { error, .. } => Some(error), + } + } + + /// Returns the "id" field. + pub fn id(&self) -> &Value { + match &self { + Response::Success { id, .. } | Response::Failure { id, .. } => id, + } + } +} + +fn set_jsonrpc_field<'de, D: Deserializer<'de>>( + _deserializer: D, +) -> Result, D::Error> { + Ok(Cow::Borrowed(JSON_RPC_VERSION)) +} diff --git a/listener/Cargo.toml b/listener/Cargo.toml index c79f9223..9312b134 100644 --- a/listener/Cargo.toml +++ b/listener/Cargo.toml @@ -5,33 +5,34 @@ edition = "2021" description = "Event listener for casper-event-listener library" license-file = "../LICENSE" documentation = "README.md" -homepage = "https://github.com/CasperLabs/event-sidecar" -repository = "https://github.com/CasperLabs/event-sidecar" +homepage = "https://github.com/casper-network/casper-sidecar/" +repository = "https://github.com/casper-network/casper-sidecar/" [dependencies] -anyhow = "1.0.65" +anyhow = { workspace = true } async-stream = { workspace = true } -async-trait = "0.1.72" +async-trait = { workspace = true } bytes = "1.2.0" -casper-event-types = { path = "../types", version = "1.0.0" } -casper-types = { version = "3.0.0", features = ["std"] } +casper-event-types.workspace = true +casper-types = { workspace = true, features = ["std"] } eventsource-stream = "0.2.3" -futures = "0.3.24" -reqwest = { version = "0.11", features = ["json", "stream"] } -serde = { version = "1.0", features = ["derive"] } +futures = { workspace = true } +futures-util = { workspace = true } +metrics = { workspace = true } +once_cell = { workspace = true } +reqwest = { version = "0.12", features = ["json", "stream"] } +serde = { workspace = true, default-features = true, features = ["derive"] } serde_json = "1.0" -thiserror = "1.0.37" -tokio = { version = "1", features = ["full"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } tokio-stream = { version = "0.1.4", features = ["sync"] } tokio-util = "0.7.8" -tracing = "0.1" +tracing = { workspace = true, default-features = true } url = "2.3.1" -once_cell = { workspace = true } -futures-util = { workspace = true } [dev-dependencies] -casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"]} +casper-event-types = { workspace = true, features = ["sse-data-testing"] } eventsource-stream = "0.2.3" mockito = "1.2.0" portpicker = "0.1.1" -warp = { version = "0.3.6"} +warp = { version = "0.3.6" } diff --git a/listener/src/connection_manager.rs b/listener/src/connection_manager.rs index 7eb4cde3..a4a9e12a 100644 --- a/listener/src/connection_manager.rs +++ b/listener/src/connection_manager.rs @@ -3,15 +3,16 @@ use crate::{ sse_connector::{EventResult, SseConnection, StreamConnector}, SseEvent, }; -use anyhow::Error; +use anyhow::{anyhow, Error}; use async_trait::async_trait; use casper_event_types::{ - metrics, sse_data::{deserialize, SseData}, Filter, }; +use casper_types::ProtocolVersion; use eventsource_stream::Event; use futures_util::Stream; +use metrics::{observe_error, sse::register_sse_message_size}; use reqwest::Url; use std::{ fmt::{self, Debug, Display}, @@ -50,6 +51,8 @@ pub struct DefaultConnectionManager { maybe_tasks: Option, filter: Filter, current_event_id_sender: Sender<(Filter, u32)>, + api_version: Option, + network_name: String, } #[derive(Debug)] @@ -99,6 +102,8 @@ pub struct DefaultConnectionManagerBuilder { pub(super) sleep_between_keep_alive_checks: Duration, /// Time of inactivity of a node connection that is allowed by KeepAliveMonitor pub(super) no_message_timeout: Duration, + /// Name of the network to which the node is connected + pub(super) network_name: String, } #[async_trait::async_trait] @@ -126,6 +131,8 @@ impl DefaultConnectionManagerBuilder { maybe_tasks: self.maybe_tasks, filter: self.filter, current_event_id_sender: self.current_event_id_sender, + api_version: None, + network_name: self.network_name, } } } @@ -133,7 +140,6 @@ impl DefaultConnectionManagerBuilder { impl DefaultConnectionManager { /// Start handling traffic from nodes endpoint. This function is blocking, it will return a /// ConnectionManagerError result if something went wrong while processing. - async fn connect( &mut self, ) -> Result + Send + 'static>>, ConnectionManagerError> @@ -229,19 +235,19 @@ impl DefaultConnectionManager { error!(error_message); return Err(Error::msg(error_message)); } - Ok((sse_data, needs_raw_json)) => { + Ok(sse_data) => { let payload_size = event.data.len(); - let mut raw_json_data = None; - if needs_raw_json { - raw_json_data = Some(event.data); - } - self.observe_bytes(payload_size); + self.observe_bytes(sse_data.type_label(), payload_size); + let api_version = self.api_version.ok_or(anyhow!( + "Expected ApiVersion to be present when handling messages." + ))?; let sse_event = SseEvent::new( event.id.parse().unwrap_or(0), sse_data, self.bind_address.clone(), - raw_json_data, self.filter.clone(), + api_version.to_string(), + self.network_name.clone(), ); self.sse_event_sender.send(sse_event).await.map_err(|_| { count_error(SENDING_FAILED); @@ -263,8 +269,6 @@ impl DefaultConnectionManager { None => Err(recoverable_error(Error::msg(FIRST_EVENT_EMPTY))), Some(Err(error)) => Err(failed_to_get_first_event(error)), Some(Ok(event)) => { - let payload_size = event.data.len(); - self.observe_bytes(payload_size); if event.data.contains(API_VERSION) { self.try_handle_api_version_message(&event, receiver).await } else { @@ -283,13 +287,17 @@ impl DefaultConnectionManager { match deserialize(&event.data) { //at this point we // are assuming that it's an ApiVersion and ApiVersion is the same across all semvers - Ok((SseData::ApiVersion(semver), _)) => { + Ok(SseData::ApiVersion(semver)) => { + let payload_size = event.data.len(); + self.observe_bytes("ApiVersion", payload_size); + self.api_version = Some(semver); let sse_event = SseEvent::new( 0, SseData::ApiVersion(semver), self.bind_address.clone(), - None, self.filter.clone(), + semver.to_string(), + self.network_name.clone(), ); self.sse_event_sender.send(sse_event).await.map_err(|_| { count_error(API_VERSION_SENDING_FAILED); @@ -313,10 +321,8 @@ impl DefaultConnectionManager { Ok(receiver) } - fn observe_bytes(&self, payload_size: usize) { - metrics::RECEIVED_BYTES - .with_label_values(&[self.filter.to_string().as_str()]) - .observe(payload_size as f64); + fn observe_bytes(&self, sse_type_label: &str, payload_size: usize) { + register_sse_message_size(sse_type_label, payload_size as f64); } } @@ -353,9 +359,7 @@ fn expected_first_message_to_be_api_version(data: String) -> ConnectionManagerEr } fn count_error(reason: &str) { - metrics::ERROR_COUNTS - .with_label_values(&["connection_manager", reason]) - .inc(); + observe_error("connection_manager", reason); } #[cfg(test)] @@ -366,7 +370,7 @@ pub mod tests { sse_connector::{tests::MockSseConnection, StreamConnector}, SseEvent, }; - use anyhow::Error; + use anyhow::anyhow; use casper_event_types::{sse_data::test_support::*, Filter}; use std::time::Duration; use tokio::{ @@ -378,7 +382,7 @@ pub mod tests { #[tokio::test] async fn given_connection_fail_should_return_error() { let connector = Box::new(MockSseConnection::build_failing_on_connection()); - let (mut connection_manager, _, _) = build_manager(connector); + let (mut connection_manager, _, _) = build_manager(connector, "test".to_string()); let res = connection_manager.do_start_handling().await; if let Err(ConnectionManagerError::NonRecoverableError { error }) = res { assert_eq!(error.to_string(), "Some error on connection"); @@ -390,7 +394,7 @@ pub mod tests { #[tokio::test] async fn given_failure_on_message_should_return_error() { let connector = Box::new(MockSseConnection::build_failing_on_message()); - let (mut connection_manager, _, _) = build_manager(connector); + let (mut connection_manager, _, _) = build_manager(connector, "test".to_string()); let res = connection_manager.do_start_handling().await; if let Err(ConnectionManagerError::InitialConnectionError { error }) = res { assert_eq!(error.to_string(), FIRST_EVENT_EMPTY); @@ -402,11 +406,11 @@ pub mod tests { #[tokio::test] async fn given_data_without_api_version_should_fail() { let data = vec![ - example_block_added_1_5_2(BLOCK_HASH_1, "1"), - example_block_added_1_5_2(BLOCK_HASH_2, "2"), + example_block_added_2_0_0(BLOCK_HASH_1, 1u64), + example_block_added_2_0_0(BLOCK_HASH_2, 2u64), ]; let connector = Box::new(MockSseConnection::build_with_data(data)); - let (mut connection_manager, _, _) = build_manager(connector); + let (mut connection_manager, _, _) = build_manager(connector, "test".to_string()); let res = connection_manager.do_start_handling().await; if let Err(ConnectionManagerError::NonRecoverableError { error }) = res { assert!(error @@ -421,11 +425,12 @@ pub mod tests { async fn given_data_should_pass_data() { let data = vec![ example_api_version(), - example_block_added_1_5_2(BLOCK_HASH_1, "1"), - example_block_added_1_5_2(BLOCK_HASH_2, "2"), + example_block_added_2_0_0(BLOCK_HASH_1, 1u64), + example_block_added_2_0_0(BLOCK_HASH_2, 2u64), ]; let connector = Box::new(MockSseConnection::build_with_data(data)); - let (mut connection_manager, data_tx, event_ids) = build_manager(connector); + let (mut connection_manager, data_tx, event_ids) = + build_manager(connector, "test".to_string()); let events_join = tokio::spawn(async move { poll_events(data_tx).await }); let event_ids_join = tokio::spawn(async move { poll_events(event_ids).await }); tokio::spawn(async move { connection_manager.do_start_handling().await }); @@ -440,10 +445,11 @@ pub mod tests { let data = vec![ example_api_version(), "XYZ".to_string(), - example_block_added_1_5_2(BLOCK_HASH_2, "2"), + example_block_added_2_0_0(BLOCK_HASH_2, 2u64), ]; let connector = Box::new(MockSseConnection::build_with_data(data)); - let (mut connection_manager, data_tx, _event_ids) = build_manager(connector); + let (mut connection_manager, data_tx, _event_ids) = + build_manager(connector, "test".to_string()); let events_join = tokio::spawn(async move { poll_events(data_tx).await }); let connection_manager_joiner = tokio::spawn(async move { connection_manager.do_start_handling().await }); @@ -453,7 +459,7 @@ pub mod tests { if let Ok(Err(ConnectionManagerError::NonRecoverableError { error })) = res { assert_eq!( error.to_string(), - "Serde Error: Couldn't deserialize Serde Error: expected value at line 1 column 1" + "Serde Error: Couldn't deserialize Error when deserializing SSE event from node: expected value at line 1 column 1" ) } else { unreachable!(); @@ -470,6 +476,7 @@ pub mod tests { fn build_manager( connector: Box, + network_name: String, ) -> ( DefaultConnectionManager, Receiver, @@ -484,8 +491,10 @@ pub mod tests { current_event_id: None, sse_event_sender: data_tx, maybe_tasks: None, - filter: Filter::Sigs, + filter: Filter::Events, current_event_id_sender: event_id_tx, + api_version: None, + network_name, }; (manager, data_rx, event_id_rx) } @@ -511,8 +520,8 @@ pub mod tests { msg, } } - pub fn fail_fast(sender: Sender) -> Self { - let error = Error::msg("xyz"); + pub fn fail_fast(msg_postfix: &str, sender: Sender) -> Self { + let error = anyhow!("xyz-{}", msg_postfix); let a = Err(ConnectionManagerError::NonRecoverableError { error }); Self::new(Duration::from_millis(1), a, sender, None) } diff --git a/listener/src/connections_builder.rs b/listener/src/connections_builder.rs index 642e47c2..ceb14a4a 100644 --- a/listener/src/connections_builder.rs +++ b/listener/src/connections_builder.rs @@ -9,6 +9,7 @@ use url::Url; use crate::{ connection_manager::{ConnectionManager, DefaultConnectionManagerBuilder}, connection_tasks::ConnectionTasks, + version_fetcher::NodeMetadata, FilterWithEventId, SseEvent, }; @@ -18,7 +19,7 @@ pub trait ConnectionsBuilder: Sync + Send { &self, last_event_id_for_filter: Arc>>, last_seen_event_id_sender: FilterWithEventId, - node_build_version: ProtocolVersion, + node_metadata: NodeMetadata, ) -> Result>, Error>; } @@ -39,10 +40,10 @@ impl ConnectionsBuilder for DefaultConnectionsBuilder { &self, last_event_id_for_filter: Arc>>, last_seen_event_id_sender: FilterWithEventId, - node_build_version: ProtocolVersion, + node_metadata: NodeMetadata, ) -> Result>, Error> { let mut connections = HashMap::new(); - let filters = filters_from_version(node_build_version); + let filters = filters_from_version(node_metadata.build_version); let maybe_tasks = (!self.allow_partial_connection).then(|| ConnectionTasks::new(filters.len())); let guard = last_event_id_for_filter.lock().await; @@ -55,6 +56,7 @@ impl ConnectionsBuilder for DefaultConnectionsBuilder { start_from_event_id, filter.clone(), last_seen_event_id_sender.clone(), + node_metadata.network_name.clone(), ) .await?; connections.insert(filter, connection); @@ -71,6 +73,7 @@ impl DefaultConnectionsBuilder { start_from_event_id: Option, filter: Filter, last_seen_event_id_sender: FilterWithEventId, + network_name: String, ) -> Result, Error> { let bind_address_for_filter = self.filtered_sse_url(&filter)?; let builder = DefaultConnectionManagerBuilder { @@ -84,6 +87,7 @@ impl DefaultConnectionsBuilder { current_event_id_sender: last_seen_event_id_sender, sleep_between_keep_alive_checks: self.sleep_between_keep_alive_checks, no_message_timeout: self.no_message_timeout, + network_name, }; Ok(Box::new(builder.build())) } @@ -95,7 +99,7 @@ impl DefaultConnectionsBuilder { } fn filters_from_version(_build_version: ProtocolVersion) -> Vec { - vec![Filter::Main, Filter::Sigs, Filter::Deploys] + vec![Filter::Events] } pub struct ConnectionConfig { @@ -112,6 +116,7 @@ pub mod tests { use super::ConnectionsBuilder; use crate::{ connection_manager::{tests::MockConnectionManager, ConnectionManager}, + version_fetcher::NodeMetadata, FilterWithEventId, }; use anyhow::Error; @@ -133,6 +138,7 @@ pub mod tests { data_pushed_from_connections: Arc>>, result: Mutex, maybe_protocol_version: Mutex>, + maybe_network_name: Mutex>, } impl Default for MockConnectionsBuilder { @@ -141,6 +147,7 @@ pub mod tests { data_pushed_from_connections: Arc::new(Mutex::new(vec![])), result: Mutex::new(vec![Ok(HashMap::new())]), maybe_protocol_version: Mutex::new(None), + maybe_network_name: Mutex::new(None), } } } @@ -192,6 +199,11 @@ pub mod tests { *data } + pub async fn get_recorded_network_name(&self) -> Option { + let data = self.maybe_network_name.lock().await; + (*data).clone() + } + fn builder_based_on_result(mut rx: Receiver, results: ResultsStoredInMock) -> Self { let data_pushed_from_connections = Arc::new(Mutex::new(vec![])); let data_pushed_from_connections_clone = data_pushed_from_connections.clone(); @@ -206,6 +218,7 @@ pub mod tests { data_pushed_from_connections, result: Mutex::new(results), maybe_protocol_version: Mutex::new(None), + maybe_network_name: Mutex::new(None), } } } @@ -219,15 +232,7 @@ pub mod tests { tx.clone(), Some(events_msg.as_str()), )); - let main_msg = format!("main-{}", msg_postfix); - let main: Box = Box::new(MockConnectionManager::ok_long( - tx.clone(), - Some(main_msg.as_str()), - )); - Ok(HashMap::from([ - (Filter::Events, events), - (Filter::Main, main), - ])) + Ok(HashMap::from([(Filter::Events, events)])) } fn response_with_failing_events( @@ -235,16 +240,8 @@ pub mod tests { tx: &Sender, ) -> Result>, Error> { let events: Box = - Box::new(MockConnectionManager::fail_fast(tx.clone())); - let main_msg = format!("main-{}", msg_postfix); - let main: Box = Box::new(MockConnectionManager::ok_long( - tx.clone(), - Some(main_msg.as_str()), - )); - Ok(HashMap::from([ - (Filter::Events, events), - (Filter::Main, main), - ])) + Box::new(MockConnectionManager::fail_fast(msg_postfix, tx.clone())); + Ok(HashMap::from([(Filter::Events, events)])) } #[async_trait] @@ -253,10 +250,11 @@ pub mod tests { &self, _last_event_id_for_filter: Arc>>, _last_seen_event_id_sender: FilterWithEventId, - node_build_version: ProtocolVersion, + node_metadata: NodeMetadata, ) -> Result>, Error> { - let mut guard = self.maybe_protocol_version.lock().await; - *guard = Some(node_build_version); + let mut guard: tokio::sync::MutexGuard<'_, Option> = + self.maybe_protocol_version.lock().await; + *guard = Some(node_metadata.build_version); drop(guard); let mut guard = self.result.lock().await; if !guard.is_empty() { diff --git a/listener/src/event_listener_status.rs b/listener/src/event_listener_status.rs index 85e494d9..2200aad3 100644 --- a/listener/src/event_listener_status.rs +++ b/listener/src/event_listener_status.rs @@ -1,4 +1,4 @@ -use casper_event_types::metrics; +use metrics::sse::store_node_status; /// Helper enum determining in what state connection to a node is in. /// It's used to named different situations in which the connection can be. @@ -14,7 +14,7 @@ pub(super) enum EventListenerStatus { /// If Event Listener reports this state it means that it was unable to establish a connection /// with node and there are no more `max_connection_attempts` left. There will be no futhrer /// tries to establish the connection. - Defunct, + ReconnectionsExhausted, /// If Event Listener reports this state it means that the node it was trying to connect to has a /// version which sidecar can't work with IncompatibleVersion, @@ -27,12 +27,10 @@ impl EventListenerStatus { EventListenerStatus::Connecting => 1, EventListenerStatus::Connected => 2, EventListenerStatus::Reconnecting => 3, - EventListenerStatus::Defunct => -1, + EventListenerStatus::ReconnectionsExhausted => -1, EventListenerStatus::IncompatibleVersion => -2, } as f64; let node_label = format!("{}:{}", node_address, sse_port); - metrics::NODE_STATUSES - .with_label_values(&[node_label.as_str()]) - .set(status); + store_node_status(node_label.as_str(), status); } } diff --git a/listener/src/lib.rs b/listener/src/lib.rs index 29149a29..bf6520b5 100644 --- a/listener/src/lib.rs +++ b/listener/src/lib.rs @@ -13,7 +13,6 @@ mod version_fetcher; use crate::event_listener_status::*; use anyhow::Error; use casper_event_types::Filter; -use casper_types::ProtocolVersion; use connection_manager::{ConnectionManager, ConnectionManagerError}; use connection_tasks::ConnectionTasks; use connections_builder::{ConnectionsBuilder, DefaultConnectionsBuilder}; @@ -28,7 +27,7 @@ use tokio::{ use tracing::{debug, error, info, warn}; pub use types::{NodeConnectionInterface, SseEvent}; use url::Url; -use version_fetcher::{for_status_endpoint, BuildVersionFetchError, VersionFetcher}; +use version_fetcher::{for_status_endpoint, MetadataFetchError, NodeMetadata, NodeMetadataFetcher}; const MAX_CONNECTION_ATTEMPTS_REACHED: &str = "Max connection attempts reached"; @@ -60,7 +59,7 @@ impl EventListenerBuilder { allow_partial_connection: self.allow_partial_connection, }); Ok(EventListener { - node_build_version: ProtocolVersion::from_parts(1, 0, 0), + node_metadata: NodeMetadata::default(), node: self.node.clone(), max_connection_attempts: self.max_connection_attempts, delay_between_attempts: self.delay_between_attempts, @@ -73,8 +72,8 @@ impl EventListenerBuilder { /// Listener that listens to a node and all the available filters it exposes. pub struct EventListener { - /// Version of the node the listener is listening to. This version is discovered by the Listener on connection. - node_build_version: ProtocolVersion, + /// Metadata of the node to which the listener is listening. + node_metadata: NodeMetadata, /// Data pointing to the node node: NodeConnectionInterface, /// Maximum numbers the listener will retry connecting to the node. @@ -85,7 +84,7 @@ pub struct EventListener { /// If set to true the listen will proceed after connecting to at least one connection. allow_partial_connection: bool, /// Fetches the build version of the node - version_fetcher: Arc, + version_fetcher: Arc, /// Builder of the connections to the node connections_builder: Arc, } @@ -95,8 +94,8 @@ enum ConnectOutcome { SystemReconnect, //In this case we don't increase the current_attempt counter } -enum GetVersionResult { - Ok(Option), +enum GetNodeMetadataResult { + Ok(Option), Retry, Error(Error), } @@ -114,22 +113,18 @@ impl EventListener { log_status_for_event_listener(EventListenerStatus::Connecting, self); let mut current_attempt = 1; while current_attempt <= self.max_connection_attempts { - if current_attempt > 1 { - sleep(self.delay_between_attempts).await; - } - match self.get_version(current_attempt).await { - GetVersionResult::Ok(Some(protocol_version)) => { - self.node_build_version = protocol_version; + self.delay_if_needed(current_attempt).await; + match self.get_metadata(current_attempt).await { + GetNodeMetadataResult::Ok(Some(node_metadata)) => { + self.node_metadata = node_metadata; current_attempt = 1 // Restart counter if the nodes version changed } - GetVersionResult::Retry => { + GetNodeMetadataResult::Retry => { current_attempt += 1; - if current_attempt >= self.max_connection_attempts { - log_status_for_event_listener(EventListenerStatus::Defunct, self); - } + self.log_connections_exhausted_if_needed(current_attempt); continue; } - GetVersionResult::Error(e) => return Err(e), + GetNodeMetadataResult::Error(e) => return Err(e), _ => {} } if let Ok(ConnectOutcome::ConnectionLost) = self @@ -143,10 +138,24 @@ impl EventListener { } current_attempt += 1; } - log_status_for_event_listener(EventListenerStatus::Defunct, self); + log_status_for_event_listener(EventListenerStatus::ReconnectionsExhausted, self); Err(Error::msg(MAX_CONNECTION_ATTEMPTS_REACHED)) } + #[inline(always)] + async fn delay_if_needed(&mut self, current_attempt: usize) { + if current_attempt > 1 { + sleep(self.delay_between_attempts).await; + } + } + + #[inline(always)] + fn log_connections_exhausted_if_needed(&mut self, current_attempt: usize) { + if current_attempt >= self.max_connection_attempts { + log_status_for_event_listener(EventListenerStatus::ReconnectionsExhausted, self); + } + } + async fn do_connect( &mut self, last_event_id_for_filter: Arc>>, @@ -157,7 +166,7 @@ impl EventListener { .build_connections( last_event_id_for_filter.clone(), last_seen_event_id_sender.clone(), - self.node_build_version, + self.node_metadata.clone(), ) .await?; let connection_join_handles = start_connections(connections); @@ -207,8 +216,9 @@ impl EventListener { match err { ConnectionManagerError::NonRecoverableError { error } => { error!( - "Restarting event listener {} because of NonRecoverableError: {}", + "Restarting event listener {}:{} because of NonRecoverableError: {}", self.node.ip_address.to_string(), + self.node.sse_port, error ); log_status_for_event_listener(EventListenerStatus::Reconnecting, self); @@ -217,7 +227,7 @@ impl EventListener { ConnectionManagerError::InitialConnectionError { error } => { //No futures_left means no more filters active, we need to restart the whole listener if futures_left.is_empty() { - error!("Restarting event listener {} because of no more active connections left: {}", self.node.ip_address.to_string(), error); + error!("Restarting event listener {}:{} because of no more active connections left: {}", self.node.ip_address.to_string(), self.node.sse_port, error); log_status_for_event_listener( EventListenerStatus::Reconnecting, self, @@ -232,30 +242,38 @@ impl EventListener { } } - async fn get_version(&mut self, current_attempt: usize) -> GetVersionResult { + async fn get_metadata(&mut self, current_attempt: usize) -> GetNodeMetadataResult { info!( "Attempting to connect...\t{}/{}", current_attempt, self.max_connection_attempts ); let fetch_result = self.version_fetcher.fetch().await; match fetch_result { - Ok(new_node_build_version) => { - if self.node_build_version != new_node_build_version { - return GetVersionResult::Ok(Some(new_node_build_version)); + Ok(node_metadata) => { + // check if reveived network name matches optional configuration + if let Some(network_name) = &self.node.network_name { + if *network_name != node_metadata.network_name { + let msg = format!("Network name {network_name} does't match name {} configured for node connection", node_metadata.network_name); + error!("{msg}"); + return GetNodeMetadataResult::Error(Error::msg(msg)); + } } - GetVersionResult::Ok(None) + if self.node_metadata != node_metadata { + return GetNodeMetadataResult::Ok(Some(node_metadata)); + } + GetNodeMetadataResult::Ok(None) } - Err(BuildVersionFetchError::VersionNotAcceptable(msg)) => { + Err(MetadataFetchError::VersionNotAcceptable(msg)) => { log_status_for_event_listener(EventListenerStatus::IncompatibleVersion, self); //The node has a build version which sidecar can't talk to. Failing fast in this case. - GetVersionResult::Error(Error::msg(msg)) + GetNodeMetadataResult::Error(Error::msg(msg)) } - Err(BuildVersionFetchError::Error(err)) => { + Err(MetadataFetchError::Error(err)) => { error!( - "Error fetching build version (for {}): {err}", + "Error fetching metadata (for {}): {err}", self.node.ip_address ); - GetVersionResult::Retry + GetNodeMetadataResult::Retry } } } @@ -325,7 +343,7 @@ fn warn_connection_lost(listener: &EventListener, current_attempt: usize) { mod tests { use crate::{ connections_builder::tests::MockConnectionsBuilder, - version_fetcher::{tests::MockVersionFetcher, BuildVersionFetchError}, + version_fetcher::{tests::MockVersionFetcher, MetadataFetchError, NodeMetadata}, EventListener, NodeConnectionInterface, }; use anyhow::Error; @@ -334,9 +352,12 @@ mod tests { #[tokio::test] async fn given_event_listener_should_not_connect_when_incompatible_version() { - let version_fetcher = MockVersionFetcher::new(vec![Err( - BuildVersionFetchError::VersionNotAcceptable("1.5.10".to_string()), - )]); + let version_fetcher = MockVersionFetcher::new( + vec![Err(MetadataFetchError::VersionNotAcceptable( + "1.5.10".to_string(), + ))], + vec![Ok("x".to_string())], + ); let connections_builder = Arc::new(MockConnectionsBuilder::default()); let err = run_event_listener(2, version_fetcher, connections_builder.clone(), true).await; @@ -347,36 +368,27 @@ mod tests { #[tokio::test] async fn given_event_listener_should_retry_version_fetch_when_first_response_is_error() { let protocol_version = ProtocolVersion::from_str("1.5.10").unwrap(); - let version_fetcher = MockVersionFetcher::new(vec![ - Err(BuildVersionFetchError::Error(Error::msg("retryable error"))), - Ok(protocol_version), - ]); + let version_fetcher = MockVersionFetcher::new( + vec![ + Err(MetadataFetchError::Error(Error::msg("retryable error"))), + Ok(protocol_version), + ], + vec![Ok("network-1".to_string()), Ok("network-2".to_string())], + ); let connections_builder = Arc::new(MockConnectionsBuilder::one_ok()); let err = run_event_listener(2, version_fetcher, connections_builder.clone(), true).await; - let received_data = connections_builder.get_received_data().await; - assert_eq!(received_data.len(), 2); - assert!(set_contains(received_data, vec!["main-1", "events-1"],)); - assert!(err.to_string().contains("Max connection attempts reached")); - } - - #[tokio::test] - async fn given_event_listener_should_fail_when_one_connection_manager_fails_other_does_not() { - let version_fetcher = MockVersionFetcher::repeatable_from_protocol_version("1.5.10"); - let connections_builder = Arc::new(MockConnectionsBuilder::one_fails_immediatly()); - - let err = run_event_listener(1, version_fetcher, connections_builder.clone(), true).await; - let received_data = connections_builder.get_received_data().await; assert_eq!(received_data.len(), 1); - assert!(set_contains(received_data, vec!["main-1"],)); + assert!(set_contains(received_data, vec!["events-1"],)); assert!(err.to_string().contains("Max connection attempts reached")); } #[tokio::test] async fn given_event_listener_should_fail_if_connection_fails() { - let version_fetcher = MockVersionFetcher::repeatable_from_protocol_version("1.5.10"); + let version_fetcher = + MockVersionFetcher::repeatable_from_protocol_version("1.5.10", "network-1"); let connections_builder = Arc::new(MockConnectionsBuilder::connection_fails()); let err = run_event_listener(1, version_fetcher, connections_builder.clone(), true).await; @@ -388,20 +400,22 @@ mod tests { #[tokio::test] async fn given_event_listener_should_fetch_data_if_enough_reconnections() { - let version_fetcher = MockVersionFetcher::repeatable_from_protocol_version("1.5.10"); + let version_fetcher = + MockVersionFetcher::repeatable_from_protocol_version("2.0.0", "network-1"); let connections_builder = Arc::new(MockConnectionsBuilder::ok_after_two_fails()); let err = run_event_listener(3, version_fetcher, connections_builder.clone(), true).await; let received_data = connections_builder.get_received_data().await; - assert_eq!(received_data.len(), 2); - assert!(set_contains(received_data, vec!["main-2", "events-2"],)); + assert_eq!(received_data.len(), 1); + assert!(set_contains(received_data, vec!["events-2"],)); assert!(err.to_string().contains("Max connection attempts reached")); } #[tokio::test] async fn given_event_listener_should_give_up_retrying_if_runs_out() { - let version_fetcher = MockVersionFetcher::repeatable_from_protocol_version("1.5.10"); + let version_fetcher = + MockVersionFetcher::repeatable_from_protocol_version("1.5.10", "network-1"); let connections_builder = Arc::new(MockConnectionsBuilder::ok_after_two_fails()); let err = run_event_listener(2, version_fetcher, connections_builder.clone(), true).await; @@ -417,7 +431,7 @@ mod tests { allow_partial_connection: bool, ) -> Error { let mut listener = EventListener { - node_build_version: ProtocolVersion::from_parts(1, 0, 0), + node_metadata: NodeMetadata::default(), node: NodeConnectionInterface::default(), max_connection_attempts, delay_between_attempts: Duration::from_secs(1), diff --git a/listener/src/sse_connector.rs b/listener/src/sse_connector.rs index 24d5a655..dc3700dc 100644 --- a/listener/src/sse_connector.rs +++ b/listener/src/sse_connector.rs @@ -1,5 +1,7 @@ -use crate::connection_manager::{non_recoverable_error, recoverable_error, ConnectionManagerError}; -use crate::keep_alive_monitor::KeepAliveMonitor; +use crate::{ + connection_manager::{non_recoverable_error, recoverable_error, ConnectionManagerError}, + keep_alive_monitor::KeepAliveMonitor, +}; use anyhow::Error; use async_stream::stream; use async_trait::async_trait; @@ -7,17 +9,16 @@ use bytes::Bytes; use eventsource_stream::{Event, EventStream, EventStreamError, Eventsource}; use futures::StreamExt; use reqwest::Client; -use std::pin::Pin; -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{fmt::Debug, pin::Pin, time::Duration}; use tokio::select; use tokio_stream::Stream; -use tracing::debug; +use tracing::{debug, trace}; use url::Url; #[derive(Clone, Debug)] pub enum SseDataStreamingError { NoDataTimeout(), - ConnectionError(Arc), + ConnectionError(), } pub type EventResult = Result>; @@ -84,8 +85,9 @@ impl SseConnection { monitor.tick().await; yield Ok(bytes); }, - Err(err) => { - yield Err(SseDataStreamingError::ConnectionError(Arc::new(Error::from(err)))); + Err(e) => { + trace!("Error when receiving bytes: {}", e); + yield Err(SseDataStreamingError::ConnectionError()); break; } } @@ -177,7 +179,6 @@ pub mod tests { use std::{ convert::Infallible, pin::Pin, - sync::Arc, time::{Duration, Instant}, }; use tokio::sync::mpsc::channel; @@ -225,9 +226,7 @@ pub mod tests { } pub fn build_failing_on_message() -> Self { - let e = SseDataStreamingError::ConnectionError(Arc::new(Error::msg( - "Some error on message", - ))); + let e = SseDataStreamingError::ConnectionError(); MockSseConnection { data: vec![], failure_on_connection: None, diff --git a/listener/src/types.rs b/listener/src/types.rs index dd39c63c..5b26645c 100644 --- a/listener/src/types.rs +++ b/listener/src/types.rs @@ -11,6 +11,7 @@ pub struct NodeConnectionInterface { pub ip_address: IpAddr, pub sse_port: u16, pub rest_port: u16, + pub network_name: Option, } #[cfg(test)] @@ -20,6 +21,7 @@ impl Default for NodeConnectionInterface { ip_address: "127.0.0.1".parse().unwrap(), sse_port: 100, rest_port: 200, + network_name: None, } } } @@ -32,11 +34,12 @@ pub struct SseEvent { pub data: SseData, /// Source from which we got the message pub source: Url, - /// In some cases it is required to emit the data exactly as we got it from the node. - /// For those situations we store the exact text of the raw payload in this field. - pub json_data: Option, /// Info from which filter we received the message. For some events (Shutdown in particularly) we want to push only to the same outbound as we received them from so we don't duplicate. pub inbound_filter: Filter, + /// Api version which was reported for the node from which the event was received. + pub api_version: String, + /// Network name of the node from which the event was received. + pub network_name: String, } impl SseEvent { @@ -44,8 +47,9 @@ impl SseEvent { id: u32, data: SseData, mut source: Url, - json_data: Option, inbound_filter: Filter, + api_version: String, + network_name: String, ) -> Self { // This is to remove the path e.g. /events/main // Leaving just the IP and port @@ -54,8 +58,9 @@ impl SseEvent { id, data, source, - json_data, inbound_filter, + api_version, + network_name, } } } diff --git a/listener/src/version_fetcher.rs b/listener/src/version_fetcher.rs index 53af8e0c..540ebf29 100644 --- a/listener/src/version_fetcher.rs +++ b/listener/src/version_fetcher.rs @@ -1,6 +1,7 @@ use anyhow::{anyhow, Context, Error}; use async_trait::async_trait; use casper_types::ProtocolVersion; +use metrics::observe_error; use once_cell::sync::Lazy; use serde_json::Value; use std::str::FromStr; @@ -8,18 +9,19 @@ use tracing::debug; use url::Url; const BUILD_VERSION_KEY: &str = "build_version"; +const CHAINSPEC_NAME_KEY: &str = "chainspec_name"; static MINIMAL_NODE_VERSION: Lazy = - Lazy::new(|| ProtocolVersion::from_parts(1, 5, 2)); + Lazy::new(|| ProtocolVersion::from_parts(2, 0, 0)); #[derive(Debug)] -pub enum BuildVersionFetchError { +pub enum MetadataFetchError { Error(anyhow::Error), VersionNotAcceptable(String), } #[cfg(test)] -impl Clone for BuildVersionFetchError { +impl Clone for MetadataFetchError { fn clone(&self) -> Self { match self { Self::Error(err) => Self::Error(Error::msg(err.to_string())), @@ -28,11 +30,32 @@ impl Clone for BuildVersionFetchError { } } +#[derive(Eq, PartialEq, Clone, Default, Debug)] +pub struct NodeMetadata { + pub build_version: ProtocolVersion, + pub network_name: String, +} + +impl NodeMetadata { + pub fn validate(&self) -> Result<(), MetadataFetchError> { + if self.build_version.lt(&MINIMAL_NODE_VERSION) { + let msg = format!( + "Node version expected to be >= {}.", + MINIMAL_NODE_VERSION.value(), + ); + Err(MetadataFetchError::VersionNotAcceptable(msg)) + } else { + Ok(()) + } + } +} + #[async_trait] -pub trait VersionFetcher: Sync + Send { - async fn fetch(&self) -> Result; +pub trait NodeMetadataFetcher: Sync + Send { + async fn fetch(&self) -> Result; } -pub fn for_status_endpoint(status_endpoint: Url) -> impl VersionFetcher { + +pub fn for_status_endpoint(status_endpoint: Url) -> impl NodeMetadataFetcher { StatusEndpointVersionFetcher { status_endpoint } } @@ -42,22 +65,22 @@ pub struct StatusEndpointVersionFetcher { } #[async_trait] -impl VersionFetcher for StatusEndpointVersionFetcher { - async fn fetch(&self) -> Result { +impl NodeMetadataFetcher for StatusEndpointVersionFetcher { + async fn fetch(&self) -> Result { let status_endpoint = self.status_endpoint.clone(); debug!("Fetching build version for {}", status_endpoint); - match fetch_build_version_from_status(status_endpoint).await { - Ok(version) => { - validate_version(&version)?; - Ok(version) + match fetch_metadata_from_status(status_endpoint).await { + Ok(metadata) => { + metadata.validate()?; + Ok(metadata) } - Err(fetch_err) => Err(BuildVersionFetchError::Error(fetch_err)), + Err(fetch_err) => Err(MetadataFetchError::Error(fetch_err)), } } } // Fetch the build version by requesting the status from the node's rest server. -async fn fetch_build_version_from_status(status_endpoint: Url) -> Result { +async fn fetch_metadata_from_status(status_endpoint: Url) -> Result { let status_response = reqwest::get(status_endpoint) .await .context("Should have responded with status")?; @@ -69,45 +92,51 @@ async fn fetch_build_version_from_status(status_endpoint: Url) -> Result Result<(), BuildVersionFetchError> { - if version.lt(&MINIMAL_NODE_VERSION) { - let msg = format!( - "Node version expected to be >= {}.", - MINIMAL_NODE_VERSION.value(), - ); - Err(BuildVersionFetchError::VersionNotAcceptable(msg)) - } else { - Ok(()) +fn try_resolve_network_name(raw_response: &Value) -> Result { + match raw_response.get(CHAINSPEC_NAME_KEY) { + Some(build_version_value) if build_version_value.is_string() => { + let raw = build_version_value + .as_str() + .context("chainspec_name should be a string")?; + Ok(raw.to_string()) + } + _ => { + count_error("failed_getting_chainspec_name_from_node_status"); + Err(anyhow!( + "failed to get {} from status response {}", + CHAINSPEC_NAME_KEY, + raw_response + )) + } } } -fn try_resolve_version(raw_response: Value) -> Result { +fn try_resolve_version(raw_response: &Value) -> Result { match raw_response.get(BUILD_VERSION_KEY) { Some(build_version_value) if build_version_value.is_string() => { let raw = build_version_value .as_str() .context("build_version_value should be a string") - .map_err(|e| { - count_error("version_value_not_a_string"); - e - })? + .inspect_err(|_| count_error("version_value_not_a_string"))? .split('-') .next() .context("splitting build_version_value should always return at least one slice") - .map_err(|e| { - count_error("incomprehensible_build_version_form"); - e - })?; + .inspect_err(|_| count_error("incomprehensible_build_version_form"))?; ProtocolVersion::from_str(raw).map_err(|error| { count_error("failed_parsing_protocol_version"); anyhow!("failed parsing build version from '{}': {}", raw, error) }) } _ => { - count_error("failed_getting_status_from_payload"); + count_error("failed_getting_build_version_from_node_status"); Err(anyhow!( "failed to get {} from status response {}", BUILD_VERSION_KEY, @@ -118,9 +147,7 @@ fn try_resolve_version(raw_response: Value) -> Result { } fn count_error(reason: &str) { - casper_event_types::metrics::ERROR_COUNTS - .with_label_values(&["fetching_build_version_for_node", reason]) - .inc(); + observe_error("fetching_build_version_for_node", reason); } #[cfg(test)] @@ -128,50 +155,85 @@ pub mod tests { use super::*; use casper_types::{ProtocolVersion, SemVer}; use mockito::{Mock, Server, ServerGuard}; - use serde_json::json; + use serde_json::Map; use tokio::sync::Mutex; #[tokio::test] - async fn try_resolve_version_should_interpret_cortest_by_build_versionrect_build_version() { - let mut protocol = test_by_build_version(Some("5.1.111-b94c4f79a")) + async fn try_resolve_version_should_interpret_correct_build_version() { + let mut metadata = test_by_build_version(Some("5.1.111-b94c4f79a"), Some("network-1")) .await .unwrap(); - assert_eq!(protocol, ProtocolVersion::new(SemVer::new(5, 1, 111))); + assert_eq!( + metadata, + NodeMetadata { + build_version: ProtocolVersion::new(SemVer::new(5, 1, 111)), + network_name: "network-1".to_string(), + } + ); - protocol = test_by_build_version(Some("6.2.112-b94c4f79a-casper-mainnet")) + metadata = + test_by_build_version(Some("6.2.112-b94c4f79a-casper-mainnet"), Some("network-2")) + .await + .unwrap(); + assert_eq!( + metadata, + NodeMetadata { + build_version: ProtocolVersion::new(SemVer::new(6, 2, 112)), + network_name: "network-2".to_string(), + } + ); + + metadata = test_by_build_version(Some("7.3.113"), Some("network-3")) .await .unwrap(); - assert_eq!(protocol, ProtocolVersion::new(SemVer::new(6, 2, 112))); - - protocol = test_by_build_version(Some("7.3.113")).await.unwrap(); - assert_eq!(protocol, ProtocolVersion::new(SemVer::new(7, 3, 113))); + assert_eq!( + metadata, + NodeMetadata { + build_version: ProtocolVersion::new(SemVer::new(7, 3, 113)), + network_name: "network-3".to_string(), + } + ); - let version_validation_failed = test_by_build_version(Some("1.5.1")).await; + let version_validation_failed = + test_by_build_version(Some("1.5.1"), Some("some-network")).await; assert!(matches!( version_validation_failed, - Err(BuildVersionFetchError::VersionNotAcceptable(_)) + Err(MetadataFetchError::VersionNotAcceptable(_)) )); } #[tokio::test] async fn try_resolve_should_fail_if_build_version_is_absent() { - let ret = test_by_build_version(None).await; + let ret = test_by_build_version(None, Some("x")).await; assert!(ret.is_err()); } #[tokio::test] async fn try_resolve_should_fail_if_build_version_is_invalid() { - let ret = test_by_build_version(Some("not-a-semver")).await; + let ret = test_by_build_version(Some("not-a-semver"), Some("x")).await; assert!(ret.is_err()); } - fn build_server_mock(build_version: Option<&str>) -> (Mock, String, ServerGuard) { - let mut server = Server::new(); + #[tokio::test] + async fn try_resolve_should_fail_if_no_network_name_in_response() { + let ret = test_by_build_version(Some("2.0.0"), None).await; + assert!(ret.is_err()); + } + + async fn build_server_mock( + build_version: Option<&str>, + network_name: Option<&str>, + ) -> (Mock, String, ServerGuard) { + let mut server = Server::new_async().await; let url = format!("{}/status", server.url()); - let json_object = match build_version { - Some(version) => json!({ BUILD_VERSION_KEY: version }), - None => json!({}), - }; + let mut m = Map::new(); + if let Some(version) = build_version { + m.insert(BUILD_VERSION_KEY.to_string(), version.to_string().into()); + } + if let Some(network) = network_name { + m.insert(CHAINSPEC_NAME_KEY.to_string(), network.to_string().into()); + } + let json_object: Value = m.into(); let raw_json = json_object.to_string(); let mock = server .mock("GET", "/status") @@ -185,8 +247,9 @@ pub mod tests { async fn test_by_build_version( build_version: Option<&str>, - ) -> Result { - let (mock, url, _server) = build_server_mock(build_version); + network_name: Option<&str>, + ) -> Result { + let (mock, url, _server) = build_server_mock(build_version, network_name).await; let result = for_status_endpoint(Url::parse(&url).unwrap()).fetch().await; mock.assert(); result @@ -194,35 +257,51 @@ pub mod tests { pub struct MockVersionFetcher { repeatable: bool, - version_responses: Mutex>>, + version_responses: Mutex>>, + network_name_responses: Mutex>>, } impl MockVersionFetcher { - pub fn repeatable_from_protocol_version(version: &str) -> Self { + pub fn repeatable_from_protocol_version(version: &str, network_name: &str) -> Self { let protocol_version = ProtocolVersion::from_str(version).unwrap(); Self { repeatable: true, version_responses: Mutex::new(vec![Ok(protocol_version)]), + network_name_responses: Mutex::new(vec![Ok(network_name.to_string())]), } } pub fn new( - version_responses: Vec>, + version_responses: Vec>, + network_name_responses: Vec>, ) -> Self { Self { repeatable: false, version_responses: Mutex::new(version_responses), + network_name_responses: Mutex::new(network_name_responses), } } } #[async_trait] - impl VersionFetcher for MockVersionFetcher { - async fn fetch(&self) -> Result { + impl NodeMetadataFetcher for MockVersionFetcher { + async fn fetch(&self) -> Result { let mut version_responses = self.version_responses.lock().await; + let mut network_name_responses = self.network_name_responses.lock().await; if self.repeatable { - return version_responses[0].clone(); + let version = version_responses[0].clone()?; + let network_name = network_name_responses[0].clone()?; + return Ok(NodeMetadata { + build_version: version, + network_name, + }); } - version_responses.pop().unwrap() //If we are fetching something that wasn't prepared it should be an error + //If we are fetching something that wasn't prepared it should be an error + let version = version_responses.pop().unwrap()?; + let network_name = network_name_responses.pop().unwrap()?; + Ok(NodeMetadata { + build_version: version, + network_name, + }) } } } diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml new file mode 100644 index 00000000..f89c5af8 --- /dev/null +++ b/metrics/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "metrics" +authors = ["Jakub Zajkowski "] +version = "1.0.0" +edition = "2021" +readme = "README.md" +description = "Lib aggregating metrics for casper-sidecar" +license-file = "../LICENSE" +documentation = "README.md" +homepage = "https://github.com/casper-network/casper-sidecar/" +repository = "https://github.com/casper-network/casper-sidecar/" + +[dependencies] +once_cell = { workspace = true } +prometheus = { version = "0.13.3", features = ["process"] } + +[features] +additional-metrics = [] diff --git a/metrics/README.md b/metrics/README.md new file mode 100644 index 00000000..a1aa861c --- /dev/null +++ b/metrics/README.md @@ -0,0 +1,3 @@ +# `metrics` + +Library to keep metrics for the project. This is kept separately from `types` since not all modules that use metrics need `types`. \ No newline at end of file diff --git a/metrics/src/db.rs b/metrics/src/db.rs new file mode 100644 index 00000000..8fc9177e --- /dev/null +++ b/metrics/src/db.rs @@ -0,0 +1,69 @@ +use super::REGISTRY; +use once_cell::sync::Lazy; +#[cfg(feature = "additional-metrics")] +use prometheus::GaugeVec; +use prometheus::{HistogramOpts, HistogramVec, Opts}; + +const RAW_DATA_SIZE_BUCKETS: &[f64; 8] = &[ + 5e+2_f64, 1e+3_f64, 2e+3_f64, 5e+3_f64, 5e+4_f64, 5e+5_f64, 5e+6_f64, 5e+7_f64, +]; + +static FETCHED_RAW_DATA_SIZE: Lazy = Lazy::new(|| { + let counter = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "db_raw_data_size", + "Size (in bytes) of raw data fetched from the database.", + ), + buckets: Vec::from(RAW_DATA_SIZE_BUCKETS as &'static [f64]), + }, + &["message_type"], + ) + .expect("db_raw_data_size metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +pub fn observe_raw_data_size(message_type: &str, bytes: usize) { + FETCHED_RAW_DATA_SIZE + .with_label_values(&[message_type]) + .observe(bytes as f64); +} + +#[cfg(feature = "additional-metrics")] +const DB_OPERATION_BUCKETS: &[f64; 8] = &[ + 3e+5_f64, 3e+6_f64, 10e+6_f64, 20e+6_f64, 5e+7_f64, 1e+8_f64, 5e+8_f64, 1e+9_f64, +]; + +#[cfg(feature = "additional-metrics")] +pub static DB_OPERATION_TIMES: Lazy = Lazy::new(|| { + let counter = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "db_operation_times", + "Times (in nanoseconds) it took to perform a database operation.", + ), + buckets: Vec::from(DB_OPERATION_BUCKETS as &'static [f64]), + }, + &["filter"], + ) + .expect("metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); +#[cfg(feature = "additional-metrics")] +pub static EVENTS_PROCESSED_PER_SECOND: Lazy = Lazy::new(|| { + let counter = GaugeVec::new( + Opts::new("events_processed", "Events processed by sidecar. Split by \"module\" which should be either \"inbound\" or \"outbound\". \"Inbound\" means the number of events per second which were read from node endpoints and persisted in DB. \"Outbound\" means number of events pushed to clients."), + &["module"] +) +.expect("metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); diff --git a/metrics/src/lib.rs b/metrics/src/lib.rs new file mode 100644 index 00000000..bfc3f104 --- /dev/null +++ b/metrics/src/lib.rs @@ -0,0 +1,8 @@ +pub mod metrics; +pub use metrics::{metrics_summary, observe_error, MetricCollectionError}; +pub mod db; +pub mod rest_api; +pub mod rpc; +pub mod sse; + +use metrics::REGISTRY; diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs new file mode 100644 index 00000000..694a3f93 --- /dev/null +++ b/metrics/src/metrics.rs @@ -0,0 +1,82 @@ +use std::fmt::{Display, Formatter}; + +use once_cell::sync::Lazy; +use prometheus::{IntCounterVec, Opts, Registry}; + +pub static REGISTRY: Lazy = Lazy::new(Registry::new); + +static ERROR_COUNTS: Lazy = Lazy::new(|| { + let counter = IntCounterVec::new( + Opts::new("error_counts", "Error counts"), + &["category", "description"], + ) + .unwrap(); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +pub fn observe_error(category: &str, description: &str) { + ERROR_COUNTS + .with_label_values(&[category, description]) + .inc(); +} + +pub struct MetricCollectionError { + reason: String, +} + +impl Display for MetricCollectionError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "MetricCollectionError: {}", self.reason) + } +} + +impl MetricCollectionError { + fn new(reason: String) -> Self { + MetricCollectionError { reason } + } +} + +pub fn metrics_summary() -> Result { + use prometheus::Encoder; + let encoder = prometheus::TextEncoder::new(); + let mut buffer = Vec::new(); + if let Err(e) = encoder.encode(®ISTRY.gather(), &mut buffer) { + return Err(MetricCollectionError::new(format!( + "could not encode custom metrics: {}", + e + ))); + }; + let mut res = match String::from_utf8(buffer.clone()) { + Ok(v) => v, + Err(e) => { + return Err(MetricCollectionError::new(format!( + "custom metrics have a non-utf8 character: {}", + e + ))); + } + }; + buffer.clear(); + + let mut buffer = Vec::new(); + if let Err(e) = encoder.encode(&prometheus::gather(), &mut buffer) { + return Err(MetricCollectionError::new(format!( + "error when encoding default prometheus metrics: {}", + e + ))); + }; + let res_custom = match String::from_utf8(buffer.clone()) { + Ok(v) => v, + Err(e) => { + return Err(MetricCollectionError::new(format!( + "default and custom metrics have a non-utf8 character: {}", + e + ))) + } + }; + buffer.clear(); + res.push_str(&res_custom); + Ok(res) +} diff --git a/metrics/src/rest_api.rs b/metrics/src/rest_api.rs new file mode 100644 index 00000000..61cdce19 --- /dev/null +++ b/metrics/src/rest_api.rs @@ -0,0 +1,50 @@ +use super::REGISTRY; +use once_cell::sync::Lazy; +use prometheus::{HistogramOpts, HistogramVec, IntGauge, Opts}; +use std::time::Duration; + +const RESPONSE_TIME_MS_BUCKETS: &[f64; 8] = &[ + 1_f64, 5_f64, 10_f64, 30_f64, 50_f64, 100_f64, 200_f64, 300_f64, +]; + +static CONNECTED_CLIENTS: Lazy = Lazy::new(|| { + let counter = IntGauge::new("rest_api_connected_clients", "Connected Clients") + .expect("rest_api_connected_clients metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static RESPONSE_TIMES_MS: Lazy = Lazy::new(|| { + let counter = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "rest_api_response_times", + "Time it takes the service to produce a response in milliseconds", + ), + buckets: Vec::from(RESPONSE_TIME_MS_BUCKETS as &'static [f64]), + }, + &["label", "status"], + ) + .expect("rest_api_response_times metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +pub fn inc_connected_clients() { + CONNECTED_CLIENTS.inc(); +} + +pub fn dec_connected_clients() { + CONNECTED_CLIENTS.dec(); +} + +pub fn observe_response_time(label: &str, status: &str, response_time: Duration) { + let response_time = response_time.as_secs_f64() * 1000.0; + RESPONSE_TIMES_MS + .with_label_values(&[label, status]) + .observe(response_time); +} diff --git a/metrics/src/rpc.rs b/metrics/src/rpc.rs new file mode 100644 index 00000000..df3690b0 --- /dev/null +++ b/metrics/src/rpc.rs @@ -0,0 +1,145 @@ +use std::time::Duration; + +use super::REGISTRY; +use once_cell::sync::Lazy; +use prometheus::{Histogram, HistogramOpts, HistogramVec, IntCounterVec, IntGauge, Opts}; + +const RESPONSE_SIZE_BUCKETS: &[f64; 8] = &[ + 5e+2_f64, 1e+3_f64, 2e+3_f64, 5e+3_f64, 5e+4_f64, 5e+5_f64, 5e+6_f64, 5e+7_f64, +]; + +const RESPONSE_TIME_MS_BUCKETS: &[f64; 9] = &[ + 1_f64, 5_f64, 10_f64, 30_f64, 50_f64, 100_f64, 300_f64, 1000_f64, 3000_f64, +]; + +static ENDPOINT_CALLS: Lazy = Lazy::new(|| { + let counter = IntCounterVec::new( + Opts::new("rpc_server_endpoint_calls", "Endpoint calls"), + &["endpoint_name"], + ) + .unwrap(); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static TIMEOUT_COUNTERS: Lazy = Lazy::new(|| { + let counter = IntCounterVec::new( + Opts::new( + "rpc_server_timeout_counts", + "Counters for how many of the requests failed due to internal timeout", + ), + &["timer"], + ) + .unwrap(); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static RESPONSE_TIMES_MS: Lazy = Lazy::new(|| { + let histogram = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "rpc_server_endpoint_response_times", + "Time it takes the service to produce a response in milliseconds", + ), + buckets: Vec::from(RESPONSE_TIME_MS_BUCKETS as &'static [f64]), + }, + &["method", "status"], + ) + .expect("rpc_server_endpoint_response_times metric can't be created"); + REGISTRY + .register(Box::new(histogram.clone())) + .expect("cannot register metric"); + histogram +}); + +static RECONNECT_TIMES_MS: Lazy = Lazy::new(|| { + let opts = HistogramOpts::new( + "rpc_server_reconnect_time", + "Time it takes the service to reconnect to node binary port in milliseconds", + ) + .buckets(RESPONSE_TIME_MS_BUCKETS.to_vec()); + let histogram = + Histogram::with_opts(opts).expect("rpc_server_reconnect_time metric can't be created"); + REGISTRY + .register(Box::new(histogram.clone())) + .expect("cannot register metric"); + histogram +}); + +static MISMATCHED_IDS: Lazy = Lazy::new(|| { + let counter = IntGauge::new( + "rpc_server_mismatched_ids", + "Number of mismatched ID events observed in responses from binary port", + ) + .expect("rpc_server_mismatched_ids metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static DISCONNECT_EVENTS: Lazy = Lazy::new(|| { + let counter = IntGauge::new( + "rpc_server_disconnects", + "Number of TCP disconnects between sidecar and nodes binary port", + ) + .expect("rpc_server_disconnects metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static ENDPOINT_REQUEST_BYTES: Lazy = Lazy::new(|| { + let counter = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new("rpc_server_request_sizes", "Endpoint request sizes"), + buckets: Vec::from(RESPONSE_SIZE_BUCKETS as &'static [f64]), + }, + &["endpoint"], + ) + .unwrap(); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +pub fn inc_method_call(method: &str) { + ENDPOINT_CALLS.with_label_values(&[method]).inc(); +} + +pub fn observe_response_time(method: &str, status: &str, response_time: Duration) { + let response_time = response_time.as_secs_f64() * 1000.0; + RESPONSE_TIMES_MS + .with_label_values(&[method, status]) + .observe(response_time); +} + +pub fn observe_reconnect_time(response_time: Duration) { + let response_time = response_time.as_secs_f64() * 1000.0; + RECONNECT_TIMES_MS.observe(response_time); +} + +pub fn inc_disconnect() { + DISCONNECT_EVENTS.inc(); +} + +pub fn register_request_size(method: &str, payload_size: f64) { + ENDPOINT_REQUEST_BYTES + .with_label_values(&[method]) + .observe(payload_size); +} + +pub fn register_timeout(timer_name: &str) { + TIMEOUT_COUNTERS.with_label_values(&[timer_name]).inc(); +} + +pub fn register_mismatched_id() { + MISMATCHED_IDS.inc(); +} diff --git a/metrics/src/sse.rs b/metrics/src/sse.rs new file mode 100644 index 00000000..97a9c477 --- /dev/null +++ b/metrics/src/sse.rs @@ -0,0 +1,69 @@ +use once_cell::sync::Lazy; +use prometheus::{GaugeVec, HistogramOpts, HistogramVec, Opts}; + +use super::REGISTRY; + +const BUCKETS: &[f64; 8] = &[ + 5e+2_f64, 1e+3_f64, 2e+3_f64, 5e+3_f64, 5e+4_f64, 5e+5_f64, 5e+6_f64, 5e+7_f64, +]; + +static NODE_STATUSES: Lazy = Lazy::new(|| { + let counter = GaugeVec::new( + Opts::new("node_statuses", "Current status of node to which sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - connections_exhausted -> used up all connection attempts ; -2 - incompatible -> node is in an incompatible version"), + &["node"] + ) + .expect("metric can't be created"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static RECEIVED_BYTES: Lazy = Lazy::new(|| { + let counter = HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sse_server_received_bytes", + "Received bytes from SSE inbound per message type", + ), + buckets: Vec::from(BUCKETS as &'static [f64]), + }, + &["message_type"], + ) + .unwrap(); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register metric"); + counter +}); + +static NUMBER_OF_RECEIVED_CONTRACT_MESSAGES: Lazy = Lazy::new(|| { + let counter = GaugeVec::new( + Opts::new( + "sse_server_received_contract_messages", + "Number of messages received in TransactionProcessed events.", + ), + &["label"], + ) + .expect("cannot sse_server_received_contract_messages metric"); + REGISTRY + .register(Box::new(counter.clone())) + .expect("cannot register sse_server_received_contract_messages metric"); + counter +}); + +pub fn observe_contract_messages(label: &str, number_of_messages: usize) { + NUMBER_OF_RECEIVED_CONTRACT_MESSAGES + .with_label_values(&[label]) + .add(number_of_messages as f64); +} + +pub fn register_sse_message_size(sse_message_type: &str, payload_size: f64) { + RECEIVED_BYTES + .with_label_values(&[sse_message_type]) + .observe(payload_size); +} + +pub fn store_node_status(node_label: &str, status: f64) { + NODE_STATUSES.with_label_values(&[node_label]).set(status); +} diff --git a/resources/ETC_README.md b/resources/ETC_README.md index 99ba0a1d..799cbaf6 100644 --- a/resources/ETC_README.md +++ b/resources/ETC_README.md @@ -1,237 +1,107 @@ -# Casper Event Sidecar README for Node Operators +# Casper Sidecar README for Node Operators -## Summary of Purpose +This page contains specific instructions for node operators. Before proceeding, familiarize yourself with the main [README](../README.md) file, which covers the following: -The Casper Event Sidecar is an application that runs in tandem with the node process. This reduces the load on the node process by allowing subscribers to monitor the event stream through the Sidecar, while the node focuses entirely on the blockchain. Users needing access to the JSON-RPC will still need to query the node directly. +- [Summary of purpose](../README.md#summary-of-purpose) +- [System components and architecture](../README.md#system-components-and-architecture) +- [Configuration options](../README.md#configuring-the-sidecar) +- [Running and testing the Sidecar](../README.md#running-and-testing-the-sidecar) +- [Troubleshooting tips](../README.md#troubleshooting-tips) -While the primary use case for the Sidecar application is running alongside the node on the same machine, it can be run remotely if necessary. +## Configuring the Sidecar -### System Components & Architecture - -Casper Nodes offer a Node Event Stream API returning Server-Sent Events (SSEs) that hold JSON-encoded data. The SSE Sidecar uses this API to achieve the following goals: - -* Build a sidecar middleware service that reads the Event Stream of all connected nodes, acting as a passthrough and replicating the SSE interface of the connected nodes and their filters (i.e., `/main`, `/deploys`, and `/sigs` with support for the use of the `?start_from=` query to allow clients to get previously sent events from the Sidecar's buffer). - -* Provide a new RESTful endpoint that is discoverable to node operators. - -The SSE Sidecar uses one ring buffer for outbound events, providing some robustness against unintended subscriber disconnects. If a disconnected subscriber re-subscribes before the buffer moves past their last received event, there will be no gap in the event history if they use the `start_from` URL query. - - -## Configuration - -The file `/etc/casper-event-sidecar/config.toml` holds a default configuration. This should work if installed on a Casper node. +The file `/etc/casper-sidecar/config.toml` holds a default configuration. This should work if installed on a Casper node. If you install the Sidecar on an external server, you must update the `ip-address` values under `node_connections` appropriately. -### Node Connections +For more information, including how to setup the SSE, RPC, REST, and Admin servers, read the [configuration options](../README.md#configuring-the-sidecar) in the main README. -The Sidecar can connect to Casper nodes with versions greater or equal to `1.5.2`. +## Installing the Sidecar on a Node -The `node_connections` option configures the node (or multiple nodes) to which the Sidecar will connect and the parameters under which it will operate with that node. +The following command will install the Debian package for the Casper Sidecar service on various flavors of Linux. -``` -[[connections]] -ip_address = "127.0.0.1" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true -connection_timeout_in_seconds = 3 -no_message_timeout_in_seconds = 60 -sleep_between_keep_alive_checks_in_seconds = 30 -``` - -* `ip_address` - The IP address of the node to monitor. -* `sse_port` - The node's event stream (SSE) port. This [example configuration](../EXAMPLE_NODE_CONFIG.toml) uses port `9999`. -* `rest_port` - The node's REST endpoint for status and metrics. This [example configuration](../EXAMPLE_NODE_CONFIG.toml) uses port `8888`. -* `max_attempts` - The maximum number of attempts the Sidecar will make to connect to the node. If set to `0`, the Sidecar will not attempt to connect. -* `delay_between_retries_in_seconds` - The delay between attempts to connect to the node. -* `allow_partial_connection` - Determining whether the sidecar will allow a partial connection to this node. -* `enable_logging` - This enables logging of events from the node in question. -* `connection_timeout_in_seconds` - Number of seconds before the connection request times out. Parameter is optional, defaults to 5 -* `no_message_timeout_in_seconds` - Number of seconds after which the connection will be restarted if no bytes were received. Parameter is optional, defaults to 120 -* `sleep_between_keep_alive_checks_in_seconds` - Optional parameter specifying the time intervals (in seconds) for checking if the connection is still alive. Defaults to 60 + -Connecting to multiple nodes requires multiple `[[connections]]` sections: - -``` -[[connections]] -ip_address = "127.0.0.1" -sse_port = 9999 -rest_port = 8888 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true - -[[connections]] -ip_address = "18.154.79.193" -sse_port = 1234 -rest_port = 3456 -max_attempts = 10 -delay_between_retries_in_seconds = 5 -allow_partial_connection = false -enable_logging = true +```bash +sudo apt install ./casper-sidecar_0.1.0-0_amd64.deb ``` -### Storage +Check the service status: -This directory stores the SSE cache and an SQLite database if the Sidecar is configured to use SQLite. - -``` -[storage] -storage_path = "/var/lib/casper-event-sidecar" +```bash +systemctl status casper-sidecar ``` -### Database Connectivity - - - -The Sidecar can connect to different types of databases. The current options are `SQLite` or `PostgreSQL`. The following sections show how to configure the database connection for one of these DBs. Note that the Sidecar can only connect to one DB at a time. - -#### SQLite Database - -This section includes configurations for the SQLite database. +Check the logs and make sure the service is running as expected. -``` -[storage.sqlite_config] -file_name = "sqlite_database.db3" -max_connections_in_pool = 100 -# https://www.sqlite.org/compile.html#default_wal_autocheckpoint -wal_autocheckpointing_interval = 1000 +```bash +journalctl --no-pager -u casper-sidecar ``` -* `file_name` - The database file path. -* `max_connections_in_pool` - The maximum number of connections to the database. (Should generally be left as is.) -* `wal_autocheckpointing_interval` - This controls how often the system commits pages to the database. The value determines the maximum number of pages before forcing a commit. More information can be found [here](https://www.sqlite.org/compile.html#default_wal_autocheckpoint). +If you see any errors, you may need to [update the configuration](#configuring-the-service) and restart the service with the commands below. -#### PostgreSQL Database +## Running the Sidecar on a Node -The properties listed below are elements of the PostgreSQL database connection that can be configured for the Sidecar. +The `casper-sidecar` service starts after installation, using the systemd service file. -* `database_name` - Name of the database. -* `host` - URL to PostgreSQL instance. -* `database_username` - Username. -* `database_password` - Database password. -* `max_connections_in_pool` - The maximum number of connections to the database. -* `port` - The port for the database connection. +### Stop +`sudo systemctl stop casper-sidecar.service` -To run the Sidecar with PostgreSQL, you can set the following database environment variables to control how the Sidecar connects to the database. This is the suggested method to set the connection information for the PostgreSQL database. +### Start -``` -SIDECAR_POSTGRES_USERNAME="your username" -``` +`sudo systemctl start casper-sidecar.service` -``` -SIDECAR_POSTGRES_PASSWORD="your password" -``` +## Sidecar Storage -``` -SIDECAR_POSTGRES_DATABASE_NAME="your database name" -``` +This directory stores the SSE cache and an SQLite database if the Sidecar was configured to use SQLite. -``` -SIDECAR_POSTGRES_HOST="your host" +```toml +[storage] +storage_folder = "./target/storage" ``` -``` -SIDECAR_POSTGRES_MAX_CONNECTIONS="max connections" -``` +Check the service status: -``` -SIDECAR_POSTGRES_PORT="port" +```bash +systemctl status casper-sidecar ``` -However, DB connectivity can also be configured using the Sidecar configuration file. - -If the DB environment variables and the Sidecar's configuration file have the same variable set, the DB environment variables will take precedence. - -It is possible to completely omit the PostgreSQL configuration from the Sidecar's configuration file. In this case, the Sidecar will attempt to connect to the PostgreSQL using the database environment variables or use some default values for non-critical variables. +Check the logs and make sure the service is running as expected. -``` -[storage.postgresql_config] -database_name = "event_sidecar" -host = "localhost" -database_password = "p@$$w0rd" -database_username = "postgres" -max_connections_in_pool = 30 +```bash +journalctl --no-pager -u casper-sidecar ``` -### REST & Event Stream Criteria +If you see any errors, you may need to [update the configuration](#configuring-the-service) and restart the service with the commands below. -This information determines outbound connection criteria for the Sidecar's `rest_server`. +## Running the Sidecar on a Node - +The `casper-sidecar` service starts after installation, using the systemd service file. -``` -[rest_server] -port = 18888 -max_concurrent_requests = 50 -max_requests_per_second = 50 -request_timeout_in_seconds = 10 -``` - -* `port` - The port for accessing the Sidecar's `rest_server`. `18888` is the default, but operators are free to choose their own port as needed. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be made to the REST server. -* `max_requests_per_second` - The maximum total number of requests that can be made per second. -* `request_timeout_in_seconds` - The total time before a request times out. - -``` -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 -``` - -The `event_stream_server` section specifies a port for the Sidecar's event stream. +### Stop -Additionally, there are the following two options: +`sudo systemctl stop casper-sidecar.service` -* `max_concurrent_subscribers` - The maximum number of subscribers that can monitor the Sidecar's event stream. -* `event_stream_buffer_length` - The number of events that the stream will hold in its buffer for reference when a subscriber reconnects. +### Start -### Admin Server +`sudo systemctl start casper-sidecar.service` - +## Sidecar Storage -This optional section configures the Sidecar's administrative REST server. If this section is not specified, the Sidecar will not start an admin server. +This directory stores the SSE cache and a database if the Sidecar was configured to use one. +```toml +[storage] +storage_path = "/var/lib/casper-sidecar" ``` -[admin_server] -port = 18887 -max_concurrent_requests = 1 -max_requests_per_second = 1 -``` - -* `port` - The port for accessing the Sidecar's admin REST server. -* `max_concurrent_requests` - The maximum total number of simultaneous requests that can be sent to the admin server. -* `max_requests_per_second` - The maximum total number of requests that can be sent per second to the admin server. -Access the admin server at `http://localhost:18887/metrics/`. +The DB setup is described [here](../README#database-connectivity-setup). ## Swagger Documentation -Once the Sidecar is running, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. +If the Sidecar is running locally, access the Swagger documentation at `http://localhost:18888/swagger-ui/`. ## OpenAPI Specification An OpenAPI schema is available at `http://localhost:18888/api-doc.json/`. - -## Running the Event Sidecar - -The `casper-event-sidecar` service starts after installation, using the systemd service file. - -### Stop - -`sudo systemctl stop casper-event-sidecar.service` - -### Start - -`sudo systemctl start casper-event-sidecar.service` - -### Logs - -`journalctl --no-pager -u casper-event-sidecar` \ No newline at end of file diff --git a/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml new file mode 100644 index 00000000..c8968d0e --- /dev/null +++ b/resources/example_configs/EXAMPLE_NCTL_CONFIG.toml @@ -0,0 +1,94 @@ +[rpc_server.main_server] +enable_server = true +ip_address = "0.0.0.0" +port = 11102 +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +ip_address = "0.0.0.0" +port = 25102 +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +ip_address = "0.0.0.0" +port = 28102 +max_message_size_bytes = 4194304 +message_timeout_secs = 10 +client_access_timeout_secs = 10 +keepalive_timeout_ms = 4_000 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true +disable_event_persistence = false + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18101 +rest_port = 14101 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = true +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18102 +rest_port = 14102 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = false +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18103 +rest_port = 14103 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = false +connection_timeout_in_seconds = 3 +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + +[storage] +storage_folder = "/var/lib/casper-sidecar" + +[storage.sqlite_config] +enabled = true +file_name = "sqlite_database.db3" +max_connections_in_pool = 100 +# https://www.sqlite.org/compile.html#default_wal_autocheckpoint +wal_autocheckpointing_interval = 1000 + +[rest_api_server] +enable_server = true +port = 18888 +max_concurrent_requests = 50 +max_requests_per_second = 50 + +[admin_api_server] +enable_server = true +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 diff --git a/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml new file mode 100644 index 00000000..efc303df --- /dev/null +++ b/resources/example_configs/EXAMPLE_NCTL_POSTGRES_CONFIG.toml @@ -0,0 +1,94 @@ +[rpc_server.main_server] +enable_server = true +ip_address = "0.0.0.0" +port = 11102 +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +ip_address = "0.0.0.0" +port = 25102 +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +ip_address = "0.0.0.0" +port = 28102 +max_message_size_bytes = 4194304 +message_timeout_secs = 10 +client_access_timeout_secs = 10 +keepalive_timeout_ms = 4_000 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true +disable_event_persistence = false + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18101 +rest_port = 14101 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = true +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18102 +rest_port = 14102 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = false +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[[sse_server.connections]] +ip_address = "127.0.0.1" +sse_port = 18103 +rest_port = 14103 +max_attempts = 10 +delay_between_retries_in_seconds = 5 +allow_partial_connection = false +enable_logging = false +connection_timeout_in_seconds = 3 +no_message_timeout_in_seconds = 10 +sleep_between_keep_alive_checks_in_seconds = 5 + +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + +[storage] +storage_folder = "/var/lib/casper-sidecar" + +[storage.postgresql_config] +enabled = true +database_name = "event_sidecar" +host = "localhost" +database_password = "p@$$w0rd" +database_username = "postgres" +max_connections_in_pool = 30 + +[rest_api_server] +enable_server = true +port = 18888 +max_concurrent_requests = 50 +max_requests_per_second = 50 + +[admin_api_server] +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 diff --git a/resources/example_configs/EXAMPLE_NODE_CONFIG.toml b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml new file mode 100644 index 00000000..b011899b --- /dev/null +++ b/resources/example_configs/EXAMPLE_NODE_CONFIG.toml @@ -0,0 +1,93 @@ +[rpc_server.main_server] +enable_server = true +ip_address = "0.0.0.0" +port = 7777 +qps_limit = 100 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.speculative_exec_server] +enable_server = true +ip_address = "0.0.0.0" +port = 7778 +qps_limit = 1 +max_body_bytes = 2621440 +cors_origin = "" + +[rpc_server.node_client] +ip_address = "3.20.57.210" +port = 7777 +max_message_size_bytes = 4194304 +message_timeout_secs = 10 +client_access_timeout_secs = 10 +keepalive_timeout_ms = 4_000 + +[rpc_server.node_client.exponential_backoff] +initial_delay_ms = 1000 +max_delay_ms = 32000 +coefficient = 2 +max_attempts = 30 + +[sse_server] +enable_server = true +disable_event_persistence = false + +[[sse_server.connections]] +ip_address = "168.254.51.1" +sse_port = 9999 +rest_port = 8888 +max_attempts = 100 +delay_between_retries_in_seconds = 10 +allow_partial_connection = true +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 + +[[sse_server.connections]] +ip_address = "168.254.51.2" +sse_port = 9999 +rest_port = 8888 +max_attempts = 100 +delay_between_retries_in_seconds = 10 +allow_partial_connection = false +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 + +[[sse_server.connections]] +ip_address = "168.254.51.3" +sse_port = 9999 +rest_port = 8888 +max_attempts = 100 +delay_between_retries_in_seconds = 10 +allow_partial_connection = false +enable_logging = false +no_message_timeout_in_seconds = 20 +sleep_between_keep_alive_checks_in_seconds = 10 + +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + +[storage] +storage_folder = "/var/lib/casper-sidecar" + +[storage.sqlite_config] +enabled = true +file_name = "sqlite_database.db3" +max_connections_in_pool = 100 +# https://www.sqlite.org/compile.html#default_wal_autocheckpoint +wal_autocheckpointing_interval = 1000 + +[rest_api_server] +enable_server = true +port = 18888 +max_concurrent_requests = 50 +max_requests_per_second = 50 + +[admin_api_server] +enable_server = true +port = 18887 +max_concurrent_requests = 1 +max_requests_per_second = 1 diff --git a/resources/example_configs/default_debian_config.toml b/resources/example_configs/default_debian_config.toml new file mode 100644 index 00000000..8af67049 --- /dev/null +++ b/resources/example_configs/default_debian_config.toml @@ -0,0 +1,89 @@ +# ================================================== +# Configuration options for the JSON-RPC HTTP server +# ================================================== +[rpc_server.main_server] +# Enables the JSON-RPC HTTP server. +enable_server = true + +# Listening address for JSON-RPC HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the JSON-RPC HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +ip_address = '0.0.0.0' +port= 7777 + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by RPC server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ======================================================================== +# Configuration options for the speculative execution JSON-RPC HTTP server +# ======================================================================== +[rpc_server.speculative_exec_server] + +# Enables the speculative execution JSON-RPC HTTP server. +enable_server = true + +# Listening address for speculative execution JSON-RPC HTTP server. If the port +# is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. +# If binding fails, the speculative execution JSON-RPC HTTP server will not run, +# but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +ip_address = '0.0.0.0' +port = 7778 + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 1 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by speculative execution server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# ========================================= +# Configuration options for the node client +# ========================================= +[rpc_server.node_client] +# The address of the node to connect to. +ip_address = '127.0.0.1' +port = 7779 +# Maximum size of a message in bytes. +max_message_size_bytes = 4_194_304 +# Timeout for a node request in seconds. +message_timeout_secs = 10 +# Timeout specifying how long to wait for binary port client to be available. +client_access_timeout_secs = 10 +# The amount of time in milliseconds to wait between sending keepalive requests. +keepalive_timeout_ms = 4_000 + +[rpc_server.node_client.exponential_backoff] +# The initial delay in milliseconds before the first retry. +initial_delay_ms = 1000 +# The maximum delay in milliseconds before a retry. +max_delay_ms = 32_000 +# The multiplier to apply to the previous delay to get the next delay. +coefficient = 2 +# Maximum number of connection attempts. +max_attempts = 30 diff --git a/resources/example_configs/default_rpc_only_config.toml b/resources/example_configs/default_rpc_only_config.toml new file mode 100644 index 00000000..7b277b38 --- /dev/null +++ b/resources/example_configs/default_rpc_only_config.toml @@ -0,0 +1,89 @@ +# ================================================== +# Configuration options for the JSON-RPC HTTP server +# ================================================== +[rpc_server.main_server] +# Enables the JSON-RPC HTTP server. +enable_server = true + +# Listening address for JSON-RPC HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the JSON-RPC HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +ip_address = '0.0.0.0' +port = 7777 + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by RPC server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ======================================================================== +# Configuration options for the speculative execution JSON-RPC HTTP server +# ======================================================================== +[rpc_server.speculative_exec_server] + +# Enables the speculative execution JSON-RPC HTTP server. +enable_server = true + +# Listening address for speculative execution JSON-RPC HTTP server. If the port +# is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. +# If binding fails, the speculative execution JSON-RPC HTTP server will not run, +# but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +ip_address = '0.0.0.0' +port = 7778 + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 1 + +# Maximum number of bytes to accept in a single request body. +max_body_bytes = 2_621_440 + +# Specifies which origin will be reported as allowed by speculative execution server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# ========================================= +# Configuration options for the node client +# ========================================= +[rpc_server.node_client] +# The address of the node to connect to. +ip_address = '127.0.0.1' +port = 7779 +# Maximum size of a message in bytes. +max_message_size_bytes = 4_194_304 +# Timeout for a node request in seconds. +message_timeout_secs = 10 +# Timeout specifying how long to wait for binary port client to be available. +client_access_timeout_secs = 10 +# The amount of time in milliseconds to wait between sending keepalive requests. +keepalive_timeout_ms = 4_000 + +[rpc_server.node_client.exponential_backoff] +# The initial delay in milliseconds before the first retry. +initial_delay_ms = 1000 +# The maximum delay in milliseconds before a retry. +max_delay_ms = 32_000 +# The multiplier to apply to the previous delay to get the next delay. +coefficient = 2 +# Maximum number of connection attempts. +max_attempts = 30 diff --git a/resources/default_config.toml b/resources/example_configs/default_sse_only_config.toml similarity index 69% rename from resources/default_config.toml rename to resources/example_configs/default_sse_only_config.toml index b38ae9f3..5ab0b882 100644 --- a/resources/default_config.toml +++ b/resources/example_configs/default_sse_only_config.toml @@ -1,4 +1,8 @@ -[[connections]] +[sse_server] +enable_server = true +disable_event_persistence = false + +[[sse_server.connections]] ip_address = "127.0.0.1" sse_port = 9999 rest_port = 8888 @@ -7,26 +11,29 @@ delay_between_retries_in_seconds = 5 allow_partial_connection = false enable_logging = true +[sse_server.event_stream_server] +port = 19999 +max_concurrent_subscribers = 100 +event_stream_buffer_length = 5000 + [storage] -storage_path = "/var/lib/casper-event-sidecar" +storage_folder = "/var/lib/casper-sidecar" [storage.sqlite_config] +enabled = true file_name = "sqlite_database.db3" max_connections_in_pool = 100 # https://www.sqlite.org/compile.html#default_wal_autocheckpoint wal_autocheckpointing_interval = 1000 -[rest_server] +[rest_api_server] +enable_server = true port = 18888 max_concurrent_requests = 50 max_requests_per_second = 50 -[event_stream_server] -port = 19999 -max_concurrent_subscribers = 100 -event_stream_buffer_length = 5000 - -[admin_server] +[admin_api_server] +enable_server = true port = 18887 max_concurrent_requests = 1 max_requests_per_second = 1 \ No newline at end of file diff --git a/resources/maintainer_scripts/casper_event_sidecar/casper-event-sidecar.service b/resources/maintainer_scripts/casper_sidecar/casper-sidecar.service similarity index 69% rename from resources/maintainer_scripts/casper_event_sidecar/casper-event-sidecar.service rename to resources/maintainer_scripts/casper_sidecar/casper-sidecar.service index d39bc87d..0e34b74e 100644 --- a/resources/maintainer_scripts/casper_event_sidecar/casper-event-sidecar.service +++ b/resources/maintainer_scripts/casper_sidecar/casper-sidecar.service @@ -1,5 +1,5 @@ [Unit] -Description=Casper Event Sidecar +Description=Casper Sidecar Documentation=https://docs.casperlabs.io After=network-online.target # Stop restarting after 3 failures in 15 seconds @@ -8,7 +8,7 @@ StartLimitIntervalSec=15 [Service] Type=simple -ExecStart=/usr/bin/casper-event-sidecar --path-to-config /etc/casper-event-sidecar/config.toml +ExecStart=/usr/bin/casper-sidecar --path-to-config /etc/casper-sidecar/config.toml User=csidecar Group=csidecar Restart=on-failure diff --git a/resources/maintainer_scripts/debian/postinst b/resources/maintainer_scripts/debian/postinst index 6de3413c..7a4de836 100644 --- a/resources/maintainer_scripts/debian/postinst +++ b/resources/maintainer_scripts/debian/postinst @@ -4,8 +4,8 @@ set -e # Default Variables # --- DEFAULT_USERNAME="csidecar" -DEFAULT_CONFIG_DIRECTORY="/etc/casper-event-sidecar" -DEFAULT_DATA_DIRECTORY="/var/lib/casper-event-sidecar" +DEFAULT_CONFIG_DIRECTORY="/etc/casper-sidecar" +DEFAULT_DATA_DIRECTORY="/var/lib/casper-sidecar" # User Creation # --- diff --git a/resources/maintainer_scripts/debian/preinst b/resources/maintainer_scripts/debian/preinst index c223b452..6fa20475 100644 --- a/resources/maintainer_scripts/debian/preinst +++ b/resources/maintainer_scripts/debian/preinst @@ -4,8 +4,8 @@ set -e # Default Variables # --- DEFAULT_USERNAME="csidecar" -DEFAULT_CONFIG_DIRECTORY="/etc/casper-event-sidecar" -DEFAULT_DATA_DIRECTORY="/var/lib/casper-event-sidecar" +DEFAULT_CONFIG_DIRECTORY="/etc/casper-sidecar" +DEFAULT_DATA_DIRECTORY="/var/lib/casper-sidecar" # Creation of Files/Directories # --- diff --git a/resources/test/rpc_schema.json b/resources/test/rpc_schema.json new file mode 100644 index 00000000..6408a5dd --- /dev/null +++ b/resources/test/rpc_schema.json @@ -0,0 +1,8736 @@ +{ + "openrpc": "1.0.0-rc1", + "info": { + "version": "2.0.0", + "title": "Client API of Casper Node", + "description": "This describes the JSON-RPC 2.0 API of a node on the Casper network.", + "contact": { + "name": "Casper Labs", + "url": "https://casperlabs.io" + }, + "license": { + "name": "APACHE LICENSE, VERSION 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + } + }, + "servers": [ + { + "name": "any Sidecar with JSON RPC API enabled", + "url": "http://IP:PORT/rpc/" + } + ], + "methods": [ + { + "name": "account_put_deploy", + "summary": "receives a Deploy to be executed by the network (DEPRECATED: use `account_put_transaction` instead)", + "params": [ + { + "name": "deploy", + "schema": { + "description": "The `Deploy`.", + "$ref": "#/components/schemas/Deploy" + }, + "required": true + } + ], + "result": { + "name": "account_put_deploy_result", + "schema": { + "description": "Result for \"account_put_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy_hash": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "account_put_deploy_example", + "params": [ + { + "name": "deploy", + "value": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + } + } + ], + "result": { + "name": "account_put_deploy_example_result", + "value": { + "api_version": "2.0.0", + "deploy_hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + } + } + } + ] + }, + { + "name": "account_put_transaction", + "summary": "receives a Transaction to be executed by the network", + "params": [ + { + "name": "transaction", + "schema": { + "description": "The `Transaction`.", + "$ref": "#/components/schemas/Transaction" + }, + "required": true + } + ], + "result": { + "name": "account_put_transaction_result", + "schema": { + "description": "Result for \"account_put_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction_hash": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "account_put_transaction_example", + "params": [ + { + "name": "transaction", + "value": { + "Version1": { + "hash": "e6bc93a30584fefced99be08c1861164b357c74e4904b5d2e7146896aaaaf843", + "payload": { + "initiator_addr": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "chain_name": "casper-example", + "pricing_mode": { + "Fixed": { + "additional_computation_factor": 0, + "gas_price_tolerance": 5 + } + }, + "fields": { + "args": { + "Named": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ] + }, + "entry_point": "Transfer", + "scheduling": "Standard", + "target": "Native" + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "0124fd45fa4ad49f048d116ba86fa84cecc8e4a1c49cbae824db97472a9f5de6c63e6e77723e55d00b78605ee9a69a27ea23a73e47d29d0778534e053f18ca4101" + } + ] + } + } + } + ], + "result": { + "name": "account_put_transaction_example_result", + "value": { + "api_version": "2.0.0", + "transaction_hash": { + "Version1": "e6bc93a30584fefced99be08c1861164b357c74e4904b5d2e7146896aaaaf843" + } + } + } + } + ] + }, + { + "name": "info_get_deploy", + "summary": "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", + "params": [ + { + "name": "deploy_hash", + "schema": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" + }, + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the deploy with the finalized approvals substituted. If `false` or omitted, returns the deploy with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_deploy_result", + "schema": { + "description": "Result for \"info_get_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy": { + "description": "The deploy.", + "$ref": "#/components/schemas/Deploy" + }, + "execution_info": { + "description": "Execution info, if available.", + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionInfo" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_deploy_example", + "params": [ + { + "name": "deploy_hash", + "value": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + }, + { + "name": "finalized_approvals", + "value": true + } + ], + "result": { + "name": "info_get_deploy_example_result", + "value": { + "api_version": "2.0.0", + "deploy": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + }, + "execution_info": { + "block_hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "block_height": 10, + "execution_result": { + "Version2": { + "initiator": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "error_message": null, + "limit": "123456", + "consumed": "100000", + "cost": "246912", + "transfers": [ + { + "Version2": { + "transaction_hash": { + "Version1": "0101010101010101010101010101010101010101010101010101010101010101" + }, + "from": { + "AccountHash": "account-hash-0202020202020202020202020202020202020202020202020202020202020202" + }, + "to": "account-hash-0303030303030303030303030303030303030303030303030303030303030303", + "source": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "target": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + "amount": "1000000000000", + "gas": "2500000000", + "id": 999 + } + } + ], + "size_estimate": 206, + "effects": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": { + "AddUInt64": 8 + } + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Identity" + } + ] + } + } + } + } + } + } + ] + }, + { + "name": "info_get_transaction", + "summary": "returns a Transaction from the network", + "params": [ + { + "name": "transaction_hash", + "schema": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + }, + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the transaction with the finalized approvals substituted. If `false` or omitted, returns the transaction with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_transaction_result", + "schema": { + "description": "Result for \"info_get_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction": { + "description": "The transaction.", + "$ref": "#/components/schemas/Transaction" + }, + "execution_info": { + "description": "Execution info, if available.", + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionInfo" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_transaction_example", + "params": [ + { + "name": "transaction_hash", + "value": { + "Version1": "e6bc93a30584fefced99be08c1861164b357c74e4904b5d2e7146896aaaaf843" + } + }, + { + "name": "finalized_approvals", + "value": true + } + ], + "result": { + "name": "info_get_transaction_example_result", + "value": { + "api_version": "2.0.0", + "transaction": { + "Version1": { + "hash": "e6bc93a30584fefced99be08c1861164b357c74e4904b5d2e7146896aaaaf843", + "payload": { + "initiator_addr": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "chain_name": "casper-example", + "pricing_mode": { + "Fixed": { + "additional_computation_factor": 0, + "gas_price_tolerance": 5 + } + }, + "fields": { + "args": { + "Named": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ] + }, + "entry_point": "Transfer", + "scheduling": "Standard", + "target": "Native" + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "0124fd45fa4ad49f048d116ba86fa84cecc8e4a1c49cbae824db97472a9f5de6c63e6e77723e55d00b78605ee9a69a27ea23a73e47d29d0778534e053f18ca4101" + } + ] + } + }, + "execution_info": { + "block_hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "block_height": 10, + "execution_result": { + "Version2": { + "initiator": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "error_message": null, + "limit": "123456", + "consumed": "100000", + "cost": "246912", + "transfers": [ + { + "Version2": { + "transaction_hash": { + "Version1": "0101010101010101010101010101010101010101010101010101010101010101" + }, + "from": { + "AccountHash": "account-hash-0202020202020202020202020202020202020202020202020202020202020202" + }, + "to": "account-hash-0303030303030303030303030303030303030303030303030303030303030303", + "source": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "target": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + "amount": "1000000000000", + "gas": "2500000000", + "id": 999 + } + } + ], + "size_estimate": 206, + "effects": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": { + "AddUInt64": 8 + } + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Identity" + } + ] + } + } + } + } + } + } + ] + }, + { + "name": "state_get_account_info", + "summary": "returns an Account from the network", + "params": [ + { + "name": "account_identifier", + "schema": { + "description": "The public key of the Account.", + "$ref": "#/components/schemas/AccountIdentifier" + }, + "required": true + }, + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "state_get_account_info_result", + "schema": { + "description": "Result for \"state_get_account_info\" RPC response.", + "type": "object", + "required": [ + "account", + "api_version", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "account": { + "description": "The account.", + "$ref": "#/components/schemas/Account" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_account_info_example", + "params": [ + { + "name": "account_identifier", + "value": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + }, + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_account_info_example_result", + "value": { + "api_version": "2.0.0", + "account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [ + { + "name": "main_purse", + "key": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_entity", + "summary": "returns an AddressableEntity from the network", + "params": [ + { + "name": "entity_identifier", + "schema": { + "description": "The identifier of the entity.", + "$ref": "#/components/schemas/EntityIdentifier" + }, + "required": true + }, + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + }, + { + "name": "include_bytecode", + "schema": { + "description": "Whether to include the entity's bytecode in the response.", + "type": [ + "boolean", + "null" + ] + }, + "required": false + } + ], + "result": { + "name": "state_get_entity_result", + "schema": { + "description": "Result for \"state_get_entity\" RPC response.", + "type": "object", + "required": [ + "api_version", + "entity", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "entity": { + "description": "The addressable entity or a legacy account.", + "$ref": "#/components/schemas/EntityWithBackwardCompat" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_entity_example", + "params": [ + { + "name": "entity_identifier", + "value": { + "EntityAddr": "entity-account-0000000000000000000000000000000000000000000000000000000000000000" + } + }, + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + }, + { + "name": "include_bytecode", + "value": null + } + ], + "result": { + "name": "state_get_entity_example_result", + "value": { + "api_version": "2.0.0", + "entity": { + "AddressableEntity": { + "entity": { + "protocol_version": "2.0.0", + "entity_kind": { + "Account": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c" + }, + "package_hash": "package-0000000000000000000000000000000000000000000000000000000000000000", + "byte_code_hash": "byte-code-0000000000000000000000000000000000000000000000000000000000000000", + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "upgrade_management": 1, + "key_management": 1 + } + }, + "named_keys": [ + { + "name": "key", + "key": "hash-0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "entry_points": [ + { + "V1CasperVm": { + "name": "entry_point", + "args": [], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Caller", + "entry_point_payment": "Caller" + } + } + ], + "bytecode": null + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_package", + "summary": "returns a Package from the network", + "params": [ + { + "name": "package_identifier", + "schema": { + "description": "The identifier of the package.", + "$ref": "#/components/schemas/PackageIdentifier" + }, + "required": true + }, + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "state_get_package_result", + "schema": { + "description": "Result for \"state_get_entity\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "package" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "package": { + "description": "The addressable entity or a legacy account.", + "$ref": "#/components/schemas/PackageWithBackwardCompat" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_package_example", + "params": [ + { + "name": "package_identifier", + "value": { + "ContractPackageHash": "contract-package-0000000000000000000000000000000000000000000000000000000000000000" + } + }, + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_package_example_result", + "value": { + "api_version": "2.0.0", + "package": { + "Package": { + "versions": [], + "disabled_versions": [], + "groups": [], + "lock_status": "Unlocked" + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_dictionary_item", + "summary": "returns an item from a Dictionary", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "dictionary_identifier", + "schema": { + "description": "The Dictionary query identifier.", + "$ref": "#/components/schemas/DictionaryIdentifier" + }, + "required": true + } + ], + "result": { + "name": "state_get_dictionary_item_result", + "schema": { + "description": "Result for \"state_get_dictionary_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "dictionary_key", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "dictionary_key": { + "description": "The key under which the value is stored.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_dictionary_item_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "dictionary_identifier", + "value": { + "URef": { + "seed_uref": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "dictionary_item_key": "a_unique_entry_identifier" + } + } + } + ], + "result": { + "name": "state_get_dictionary_item_example_result", + "value": { + "api_version": "2.0.0", + "dictionary_key": "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "query_global_state", + "summary": "a query to global state using either a Block hash or state root hash", + "params": [ + { + "name": "key", + "schema": { + "description": "The key under which to query.", + "$ref": "#/components/schemas/Key" + }, + "required": true + }, + { + "name": "state_identifier", + "schema": { + "description": "The identifier used for the query. If not provided, the tip of the chain will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "query_global_state_result", + "schema": { + "description": "Result for \"query_global_state\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_header": { + "description": "The block header if a Block hash was provided.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHeader" + }, + { + "type": "null" + } + ] + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "query_global_state_example", + "params": [ + { + "name": "state_identifier", + "value": { + "BlockHash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79" + } + }, + { + "name": "key", + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + }, + { + "name": "path", + "value": [] + } + ], + "result": { + "name": "query_global_state_example_result", + "value": { + "api_version": "2.0.0", + "block_header": { + "Version2": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "0505050505050505050505050505050505050505050505050505050505050505", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" + ], + "next_era_validator_weights": [ + { + "validator": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29", + "weight": "123" + }, + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, + { + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" + } + ], + "rewards": {}, + "next_era_gas_price": 1 + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0", + "proposer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "current_gas_price": 1, + "last_switch_block_hash": "0909090909090909090909090909090909090909090909090909090909090909" + } + }, + "stored_value": { + "Account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [ + { + "name": "main_purse", + "key": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "query_balance", + "summary": "query for a balance using a purse identifier and a state identifier", + "params": [ + { + "name": "purse_identifier", + "schema": { + "description": "The identifier to obtain the purse corresponding to balance query.", + "$ref": "#/components/schemas/PurseIdentifier" + }, + "required": true + }, + { + "name": "state_identifier", + "schema": { + "description": "The state identifier used for the query, if none is passed the tip of the chain will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "query_balance_result", + "schema": { + "description": "Result for \"query_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "balance": { + "description": "The available balance in motes (total balance - sum of all active holds).", + "$ref": "#/components/schemas/U512" + } + } + } + }, + "examples": [ + { + "name": "query_balance_example", + "params": [ + { + "name": "state_identifier", + "value": { + "BlockHash": "0707070707070707070707070707070707070707070707070707070707070707" + } + }, + { + "name": "purse_identifier", + "value": { + "main_purse_under_account_hash": "account-hash-0909090909090909090909090909090909090909090909090909090909090909" + } + } + ], + "result": { + "name": "query_balance_example_result", + "value": { + "api_version": "2.0.0", + "balance": "123456" + } + } + } + ] + }, + { + "name": "query_balance_details", + "summary": "query for full balance information using a purse identifier and a state identifier", + "params": [ + { + "name": "purse_identifier", + "schema": { + "description": "The identifier to obtain the purse corresponding to balance query.", + "$ref": "#/components/schemas/PurseIdentifier" + }, + "required": true + }, + { + "name": "state_identifier", + "schema": { + "description": "The identifier for the state used for the query, if none is passed, the latest block will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "query_balance_details_result", + "schema": { + "description": "Result for \"query_balance_details\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_balance", + "holds", + "total_balance", + "total_balance_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "total_balance": { + "description": "The purses total balance, not considering holds.", + "$ref": "#/components/schemas/U512" + }, + "available_balance": { + "description": "The available balance in motes (total balance - sum of all active holds).", + "$ref": "#/components/schemas/U512" + }, + "total_balance_proof": { + "description": "A proof that the given value is present in the Merkle trie.", + "type": "string" + }, + "holds": { + "description": "Holds active at the requested point in time.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BalanceHoldWithProof" + } + } + } + } + }, + "examples": [ + { + "name": "query_balance_details_example", + "params": [ + { + "name": "state_identifier", + "value": { + "BlockHash": "0707070707070707070707070707070707070707070707070707070707070707" + } + }, + { + "name": "purse_identifier", + "value": { + "main_purse_under_account_hash": "account-hash-0909090909090909090909090909090909090909090909090909090909090909" + } + } + ], + "result": { + "name": "query_balance_details_example_result", + "value": { + "api_version": "2.0.0", + "total_balance": "123456", + "available_balance": "123456", + "total_balance_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3", + "holds": [ + { + "time": 0, + "amount": "123456", + "proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + ] + } + } + } + ] + }, + { + "name": "info_get_peers", + "summary": "returns a list of peers connected to the node", + "params": [], + "result": { + "name": "info_get_peers_result", + "schema": { + "description": "Result for \"info_get_peers\" RPC response.", + "type": "object", + "required": [ + "api_version", + "peers" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/Peers" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_peers_example", + "params": [], + "result": { + "name": "info_get_peers_example_result", + "value": { + "api_version": "2.0.0", + "peers": [ + { + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } + ] + } + } + } + ] + }, + { + "name": "info_get_status", + "summary": "returns the current status of the node", + "params": [], + "result": { + "name": "info_get_status_result", + "schema": { + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "protocol_version", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "protocol_version": { + "description": "The current Casper protocol version.", + "$ref": "#/components/schemas/ProtocolVersion" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/Peers" + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "$ref": "#/components/schemas/Digest" + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/components/schemas/MinimalBlockInfo" + }, + { + "type": "null" + } + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/components/schemas/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "$ref": "#/components/schemas/TimeDiff" + }, + "reactor_state": { + "description": "The name of the current state of node reactor.", + "type": "string" + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "$ref": "#/components/schemas/Timestamp" + }, + "available_block_range": { + "description": "The available block range in storage.", + "$ref": "#/components/schemas/AvailableBlockRange" + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "$ref": "#/components/schemas/BlockSynchronizerStatus" + }, + "latest_switch_block_hash": { + "description": "The hash of the latest switch block.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHash" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_status_example", + "params": [], + "result": { + "name": "info_get_status_example_result", + "value": { + "api_version": "2.0.0", + "protocol_version": "2.0.0", + "peers": [ + { + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } + ], + "build_version": "1.0.0-xxxxxxxxx@DEBUG", + "chainspec_name": "casper-example", + "starting_state_root_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_added_block_info": { + "hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "creator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "our_public_signing_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "round_length": "1m 5s 536ms", + "next_upgrade": { + "activation_point": 42, + "protocol_version": "2.0.1" + }, + "uptime": "13s", + "reactor_state": "Initialize", + "last_progress": "1970-01-01T00:00:00.000Z", + "available_block_range": { + "low": 0, + "high": 0 + }, + "block_sync": { + "historical": { + "block_hash": "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", + "block_height": 40, + "acquisition_state": "have strict finality(40) for: block hash 16dd..c55e" + }, + "forward": { + "block_hash": "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", + "block_height": 6701, + "acquisition_state": "have block body(6701) for: block hash 5990..4983" + } + }, + "latest_switch_block_hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + } + } + ] + }, + { + "name": "info_get_reward", + "summary": "returns the reward for a given era and a validator or a delegator", + "params": [ + { + "name": "validator", + "schema": { + "description": "The public key of the validator.", + "$ref": "#/components/schemas/PublicKey" + }, + "required": true + }, + { + "name": "era_identifier", + "schema": { + "description": "The era identifier. If `None`, the last finalized era is used.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + }, + { + "name": "delegator", + "schema": { + "description": "The public key of the delegator. If `Some`, the rewards for the delegator are returned. If `None`, the rewards for the validator are returned.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "info_get_reward_result", + "schema": { + "description": "Result for \"info_get_reward\" RPC response.", + "type": "object", + "required": [ + "api_version", + "delegation_rate", + "era_id", + "reward_amount", + "switch_block_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "reward_amount": { + "description": "The total reward amount in the requested era.", + "$ref": "#/components/schemas/U512" + }, + "era_id": { + "description": "The era for which the reward was calculated.", + "$ref": "#/components/schemas/EraId" + }, + "delegation_rate": { + "description": "The delegation rate of the validator.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "switch_block_hash": { + "description": "The switch block hash at which the reward was distributed.", + "$ref": "#/components/schemas/BlockHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_reward_example", + "params": [ + { + "name": "era_identifier", + "value": { + "Era": 1 + } + }, + { + "name": "validator", + "value": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + { + "name": "delegator", + "value": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + } + ], + "result": { + "name": "info_get_reward_example_result", + "value": { + "api_version": "2.0.0", + "reward_amount": "42", + "era_id": 1, + "delegation_rate": 20, + "switch_block_hash": "0000000000000000000000000000000000000000000000000000000000000000" + } + } + } + ] + }, + { + "name": "info_get_validator_changes", + "summary": "returns status changes of active validators", + "params": [], + "result": { + "name": "info_get_validator_changes_result", + "schema": { + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorChanges" + } + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_validator_changes_example", + "params": [], + "result": { + "name": "info_get_validator_changes_example_result", + "value": { + "api_version": "2.0.0", + "changes": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "status_changes": [ + { + "era_id": 1, + "validator_change": "Added" + } + ] + } + ] + } + } + } + ] + }, + { + "name": "info_get_chainspec", + "summary": "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files", + "params": [], + "result": { + "name": "info_get_chainspec_result", + "schema": { + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "$ref": "#/components/schemas/ChainspecRawBytes" + } + } + } + }, + "examples": [ + { + "name": "info_get_chainspec_example", + "params": [], + "result": { + "name": "info_get_chainspec_example_result", + "value": { + "api_version": "2.0.0", + "chainspec_bytes": { + "chainspec_bytes": "2a2a", + "maybe_genesis_accounts_bytes": null, + "maybe_global_state_bytes": null + } + } + } + } + ] + }, + { + "name": "chain_get_block", + "summary": "returns a Block from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_result", + "schema": { + "description": "Result for \"chain_get_block\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_with_signatures": { + "description": "The block, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/JsonBlockWithSignatures" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_block_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79" + } + } + ], + "result": { + "name": "chain_get_block_example_result", + "value": { + "api_version": "2.0.0", + "block_with_signatures": { + "block": { + "Version2": { + "hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "header": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "a5fe75ebf2edca0b9156bb968b66936325c7b59ae96d34027c132fd09555af8b", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" + ], + "next_era_validator_weights": [ + { + "validator": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29", + "weight": "123" + }, + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, + { + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" + } + ], + "rewards": {}, + "next_era_gas_price": 1 + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0", + "proposer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "current_gas_price": 1, + "last_switch_block_hash": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + }, + "body": { + "transactions": { + "0": [ + { + "Version1": "1414141414141414141414141414141414141414141414141414141414141414" + } + ], + "1": [ + { + "Version1": "1515151515151515151515151515151515151515151515151515151515151515" + } + ], + "2": [ + { + "Version1": "1616161616161616161616161616161616161616161616161616161616161616" + } + ] + }, + "rewarded_signatures": [] + } + } + }, + "proofs": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "01cf38841e5eebba855671fcf9ea88524cb289c67bd63b14ddc97366554f95580b59ae2f4b9bcba449bed91a1502429ca0dca8a74d37b9d32e8070b00a21b00d0e" + } + ] + } + } + } + } + ] + }, + { + "name": "chain_get_block_transfers", + "summary": "returns all transfers for a Block from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_transfers_result", + "schema": { + "description": "Result for \"chain_get_block_transfers\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_hash": { + "description": "The block hash, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHash" + }, + { + "type": "null" + } + ] + }, + "transfers": { + "description": "The block's transfers, if found.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/Transfer" + } + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_block_transfers_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "chain_get_block_transfers_example_result", + "value": { + "api_version": "2.0.0", + "block_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "transfers": [ + { + "Version2": { + "transaction_hash": { + "Version1": "0101010101010101010101010101010101010101010101010101010101010101" + }, + "from": { + "AccountHash": "account-hash-0202020202020202020202020202020202020202020202020202020202020202" + }, + "to": "account-hash-0303030303030303030303030303030303030303030303030303030303030303", + "source": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "target": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + "amount": "1000000000000", + "gas": "2500000000", + "id": 999 + } + } + ] + } + } + } + ] + }, + { + "name": "chain_get_state_root_hash", + "summary": "returns a state root hash at a given Block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_state_root_hash_result", + "schema": { + "description": "Result for \"chain_get_state_root_hash\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root.", + "anyOf": [ + { + "$ref": "#/components/schemas/Digest" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_state_root_hash_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Height": 10 + } + } + ], + "result": { + "name": "chain_get_state_root_hash_example_result", + "value": { + "api_version": "2.0.0", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808" + } + } + } + ] + }, + { + "name": "state_get_item", + "summary": "returns a stored value from the network. This RPC is deprecated, use `query_global_state` instead.", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root.", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "key", + "schema": { + "description": "The key under which to query.", + "$ref": "#/components/schemas/Key" + }, + "required": true + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "state_get_item_result", + "schema": { + "description": "Result for \"state_get_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_item_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "key", + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" + }, + { + "name": "path", + "value": [ + "inner" + ] + } + ], + "result": { + "name": "state_get_item_example_result", + "value": { + "api_version": "2.0.0", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 + } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "state_get_balance", + "summary": "returns a purse's balance from the network", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "The hash of state root.", + "$ref": "#/components/schemas/Digest" + }, + "required": true + }, + { + "name": "purse_uref", + "schema": { + "description": "Formatted URef.", + "type": "string" + }, + "required": true + } + ], + "result": { + "name": "state_get_balance_result", + "schema": { + "description": "Result for \"state_get_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance_value", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "balance_value": { + "description": "The available balance in motes (total balance - sum of all active holds).", + "$ref": "#/components/schemas/U512" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_balance_example", + "params": [ + { + "name": "state_root_hash", + "value": "0808080808080808080808080808080808080808080808080808080808080808" + }, + { + "name": "purse_uref", + "value": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + } + ], + "result": { + "name": "state_get_balance_example_result", + "value": { + "api_version": "2.0.0", + "balance_value": "123456", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + ] + }, + { + "name": "chain_get_era_info_by_switch_block", + "summary": "returns an EraInfo from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_era_info_by_switch_block_result", + "schema": { + "description": "Result for \"chain_get_era_info\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraSummary" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_era_info_by_switch_block_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79" + } + } + ], + "result": { + "name": "chain_get_era_info_by_switch_block_example_result", + "value": { + "api_version": "2.0.0", + "era_summary": { + "block_hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "DelegatorKind": { + "delegator_kind": { + "PublicKey": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18" + }, + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } + } + ] + } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + } + ] + }, + { + "name": "state_get_auction_info", + "summary": "returns the bids and validators as of either a specific block (by height or hash), or the most recently added block. This is a casper 1.x retro-compatibility endpoint. For blocks created in 1.x protocol it will work exactly the same as it used to. \n For 2.x blocks it will try to retrofit the changed data structure into previous schema - but it is a lossy process. Use `state_get_auction_info_v2` endpoint to get data in new format. *IMPORTANT* This method is deprecated, has been added only for compatibility with retired nodes json-rpc API and will be removed in a future release of sidecar.", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "state_get_auction_info_result", + "schema": { + "description": "Result for \"state_get_auction_info\" RPC response.", + "type": "object", + "required": [ + "api_version", + "auction_state" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "auction_state": { + "description": "The auction state.", + "$ref": "#/components/schemas/AuctionState" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_auction_info_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_auction_info_example_result", + "value": { + "api_version": "2.0.0", + "auction_state": { + "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "block_height": 10, + "era_validators": [ + { + "era_id": 10, + "validator_weights": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "weight": "10" + } + ] + } + ], + "bids": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bid": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa-007", + "staked_amount": "20", + "delegation_rate": 0, + "vesting_schedule": null, + "delegators": [ + { + "delegator_public_key": "014508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ff", + "delegator": { + "delegator_public_key": "014508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ff", + "staked_amount": "10", + "bonding_purse": "uref-fbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfb-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": null + } + } + ], + "inactive": false + } + } + ] + } + } + } + } + ] + }, + { + "name": "state_get_auction_info_v2", + "summary": "returns the bids and validators as of either a specific block (by height or hash), or the most recently added block. It works for blocks created in 1.x and 2.x", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "state_get_auction_info_v2_result", + "schema": { + "description": "Result for \"state_get_auction_info\" RPC response.", + "type": "object", + "required": [ + "api_version", + "auction_state" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "auction_state": { + "description": "The auction state.", + "$ref": "#/components/schemas/AuctionStateV2" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_auction_info_v2_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "0707070707070707070707070707070707070707070707070707070707070707" + } + } + ], + "result": { + "name": "state_get_auction_info_v2_example_result", + "value": { + "api_version": "2.0.0", + "auction_state": { + "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "block_height": 10, + "era_validators": [ + { + "era_id": 10, + "validator_weights": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "weight": "10" + } + ] + } + ], + "bids": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bid": { + "Validator": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa-007", + "staked_amount": "20", + "delegation_rate": 0, + "vesting_schedule": null, + "inactive": false, + "minimum_delegation_amount": 0, + "maximum_delegation_amount": 18446744073709551615, + "reserved_slots": 0 + } + } + }, + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bid": { + "Delegator": { + "delegator_kind": { + "PublicKey": "014508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ff" + }, + "staked_amount": "10", + "bonding_purse": "uref-fbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfbfb-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": null + } + } + } + ] + } + } + } + } + ] + }, + { + "name": "chain_get_era_summary", + "summary": "returns the era summary at either a specific block (by height or hash), or the most recently added block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_era_summary_result", + "schema": { + "description": "Result for \"chain_get_era_summary\" RPC response.", + "type": "object", + "required": [ + "api_version", + "era_summary" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "$ref": "#/components/schemas/EraSummary" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "chain_get_era_summary_example", + "params": [ + { + "name": "block_identifier", + "value": { + "Hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79" + } + } + ], + "result": { + "name": "chain_get_era_summary_example_result", + "value": { + "api_version": "2.0.0", + "era_summary": { + "block_hash": "1f8187a82bd4b28948add6f8c0ea15a8aac6160088dbc281499fdcf2f0d95a79", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "DelegatorKind": { + "delegator_kind": { + "PublicKey": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18" + }, + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } + } + ] + } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + } + } + } + } + ] + } + ], + "components": { + "schemas": { + "Deploy": { + "description": "A signed smart contract.", + "type": "object", + "required": [ + "approvals", + "hash", + "header", + "payment", + "session" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/DeployHash" + }, + "header": { + "$ref": "#/components/schemas/DeployHeader" + }, + "payment": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "session": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Approval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "DeployHash": { + "description": "Hex-encoded deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "DeployHeader": { + "description": "The header portion of a [`Deploy`].", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "dependencies", + "gas_price", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/components/schemas/PublicKey" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "gas_price": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "dependencies": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "ExecutableDeployItem": { + "description": "The executable component of a [`Deploy`].", + "oneOf": [ + { + "description": "Executable specified as raw bytes that represent Wasm code and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "ModuleBytes" + ], + "properties": { + "ModuleBytes": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Hex-encoded raw Wasm bytes.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByHash" + ], + "properties": { + "StoredContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract hash.", + "allOf": [ + { + "$ref": "#/components/schemas/ContractHash" + } + ] + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByName" + ], + "properties": { + "StoredContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by its [`PackageHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByHash" + ], + "properties": { + "StoredVersionedContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract package hash.", + "allOf": [ + { + "$ref": "#/components/schemas/ContractPackageHash" + } + ] + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByName" + ], + "properties": { + "StoredVersionedContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A native transfer which does not contain or reference a Wasm code.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "type": "object", + "required": [ + "args" + ], + "properties": { + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + }, + "RuntimeArgs": { + "description": "Represents a collection of arguments passed to a smart contract.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedArg" + } + }, + "NamedArg": { + "description": "Named arguments to a contract.", + "type": "array", + "items": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/CLValue" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "CLValue": { + "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + "type": "object", + "required": [ + "bytes", + "cl_type" + ], + "properties": { + "cl_type": { + "$ref": "#/components/schemas/CLType" + }, + "bytes": { + "type": "string" + }, + "parsed": true + }, + "additionalProperties": false + }, + "CLType": { + "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", + "oneOf": [ + { + "description": "`bool` primitive.", + "type": "string", + "enum": [ + "Bool" + ] + }, + { + "description": "`i32` primitive.", + "type": "string", + "enum": [ + "I32" + ] + }, + { + "description": "`i64` primitive.", + "type": "string", + "enum": [ + "I64" + ] + }, + { + "description": "`u8` primitive.", + "type": "string", + "enum": [ + "U8" + ] + }, + { + "description": "`u32` primitive.", + "type": "string", + "enum": [ + "U32" + ] + }, + { + "description": "`u64` primitive.", + "type": "string", + "enum": [ + "U64" + ] + }, + { + "description": "[`U128`] large unsigned integer type.", + "type": "string", + "enum": [ + "U128" + ] + }, + { + "description": "[`U256`] large unsigned integer type.", + "type": "string", + "enum": [ + "U256" + ] + }, + { + "description": "[`U512`] large unsigned integer type.", + "type": "string", + "enum": [ + "U512" + ] + }, + { + "description": "`()` primitive.", + "type": "string", + "enum": [ + "Unit" + ] + }, + { + "description": "`String` primitive.", + "type": "string", + "enum": [ + "String" + ] + }, + { + "description": "[`Key`] system type.", + "type": "string", + "enum": [ + "Key" + ] + }, + { + "description": "[`URef`] system type.", + "type": "string", + "enum": [ + "URef" + ] + }, + { + "description": "[`PublicKey`](crate::PublicKey) system type.", + "type": "string", + "enum": [ + "PublicKey" + ] + }, + { + "description": "`Option` of a `CLType`.", + "type": "object", + "required": [ + "Option" + ], + "properties": { + "Option": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", + "type": "object", + "required": [ + "List" + ], + "properties": { + "List": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { + "type": "object", + "required": [ + "err", + "ok" + ], + "properties": { + "ok": { + "$ref": "#/components/schemas/CLType" + }, + "err": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/CLType" + }, + "value": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + }, + { + "description": "Unspecified type.", + "type": "string", + "enum": [ + "Any" + ] + } + ] + }, + "ContractHash": { + "description": "The hash address of the contract", + "type": "string" + }, + "ContractPackageHash": { + "description": "The hash address of the contract package", + "type": "string" + }, + "Approval": { + "description": "A struct containing a signature of a transaction hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "Transaction": { + "description": "A versioned wrapper for a transaction or deploy.", + "oneOf": [ + { + "description": "A deploy.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/Deploy" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1": { + "description": "A unit of work sent by a client to the network, which when executed can cause global state to be altered.", + "type": "object", + "required": [ + "approvals", + "hash", + "payload" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/TransactionV1Hash" + }, + "payload": { + "$ref": "#/components/schemas/TransactionV1Payload" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Approval" + }, + "uniqueItems": true + } + } + }, + "TransactionV1Hash": { + "description": "Hex-encoded TransactionV1 hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "TransactionV1Payload": { + "description": "Internal payload of the transaction. The actual data over which the signing is done.", + "type": "object", + "required": [ + "chain_name", + "fields", + "initiator_addr", + "pricing_mode", + "timestamp", + "ttl" + ], + "properties": { + "initiator_addr": { + "$ref": "#/components/schemas/InitiatorAddr" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "chain_name": { + "type": "string" + }, + "pricing_mode": { + "$ref": "#/components/schemas/PricingMode" + }, + "fields": { + "type": "object", + "additionalProperties": true + } + }, + "additionalProperties": false + }, + "InitiatorAddr": { + "description": "The address of the initiator of a TransactionV1.", + "oneOf": [ + { + "description": "The public key of the initiator.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The account hash derived from the public key of the initiator.", + "type": "object", + "required": [ + "AccountHash" + ], + "properties": { + "AccountHash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + } + ] + }, + "AccountHash": { + "description": "Account hash as a formatted string.", + "type": "string" + }, + "PricingMode": { + "description": "Pricing mode of a Transaction.", + "oneOf": [ + { + "description": "The original payment model, where the creator of the transaction specifies how much they will pay, at what gas price.", + "type": "object", + "required": [ + "PaymentLimited" + ], + "properties": { + "PaymentLimited": { + "type": "object", + "required": [ + "gas_price_tolerance", + "payment_amount", + "standard_payment" + ], + "properties": { + "payment_amount": { + "description": "User-specified payment amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "standard_payment": { + "description": "Standard payment.", + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The cost of the transaction is determined by the cost table, per the transaction category.", + "type": "object", + "required": [ + "Fixed" + ], + "properties": { + "Fixed": { + "type": "object", + "required": [ + "additional_computation_factor", + "gas_price_tolerance" + ], + "properties": { + "additional_computation_factor": { + "description": "User-specified additional computation factor (minimum 0). If \"0\" is provided, no additional logic is applied to the computation limit. Each value above \"0\" tells the node that it needs to treat the transaction as if it uses more gas than it's serialized size indicates. Each \"1\" will increase the \"wasm lane\" size bucket for this transaction by 1. So if the size of the transaction indicates bucket \"0\" and \"additional_computation_factor = 2\", the transaction will be treated as a \"2\".", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The payment for this transaction was previously paid, as proven by the receipt hash (this is for future use, not currently implemented).", + "type": "object", + "required": [ + "Prepaid" + ], + "properties": { + "Prepaid": { + "type": "object", + "required": [ + "receipt" + ], + "properties": { + "receipt": { + "description": "Pre-paid receipt.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "TransactionHash": { + "description": "A versioned wrapper for a transaction hash or deploy hash.", + "oneOf": [ + { + "description": "A deploy hash.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction hash.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1Hash" + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionInfo": { + "description": "The block hash and height in which a given deploy was executed, along with the execution result if known.", + "type": "object", + "required": [ + "block_hash", + "block_height" + ], + "properties": { + "block_hash": { + "description": "The hash of the block in which the deploy was executed.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block in which the deploy was executed.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "execution_result": { + "description": "The execution result if known.", + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionResult" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "ExecutionResult": { + "description": "The versioned result of executing a single deploy.", + "oneOf": [ + { + "description": "Version 1 of execution result type.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/ExecutionResultV1" + } + }, + "additionalProperties": false + }, + { + "description": "Version 2 of execution result type.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/ExecutionResultV2" + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionResultV1": { + "description": "The result of executing a single deploy.", + "oneOf": [ + { + "description": "The result of a failed execution.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "object", + "required": [ + "cost", + "effect", + "error_message", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of version 1 Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "error_message": { + "description": "The error message associated with executing the deploy.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The result of a successful execution.", + "type": "object", + "required": [ + "Success" + ], + "properties": { + "Success": { + "type": "object", + "required": [ + "cost", + "effect", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionEffect": { + "description": "The sequence of execution transforms from a single deploy.", + "type": "object", + "required": [ + "operations", + "transforms" + ], + "properties": { + "operations": { + "description": "The resulting operations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Operation" + } + }, + "transforms": { + "description": "The sequence of execution transforms.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformV1" + } + } + }, + "additionalProperties": false + }, + "Operation": { + "description": "An operation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "kind": { + "description": "The type of operation.", + "allOf": [ + { + "$ref": "#/components/schemas/OpKind" + } + ] + } + }, + "additionalProperties": false + }, + "OpKind": { + "description": "The type of operation performed while executing a deploy.", + "oneOf": [ + { + "description": "A read operation.", + "type": "string", + "enum": [ + "Read" + ] + }, + { + "description": "A write operation.", + "type": "string", + "enum": [ + "Write" + ] + }, + { + "description": "An addition.", + "type": "string", + "enum": [ + "Add" + ] + }, + { + "description": "An operation which has no effect.", + "type": "string", + "enum": [ + "NoOp" + ] + }, + { + "description": "A prune operation.", + "type": "string", + "enum": [ + "Prune" + ] + } + ] + }, + "TransformV1": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "transform" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "transform": { + "description": "The transformation.", + "allOf": [ + { + "$ref": "#/components/schemas/TransformKindV1" + } + ] + } + }, + "additionalProperties": false + }, + "TransformKindV1": { + "description": "The actual transformation performed while executing a deploy.", + "oneOf": [ + { + "description": "A transform having no effect.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes the given CLValue to global state.", + "type": "object", + "required": [ + "WriteCLValue" + ], + "properties": { + "WriteCLValue": { + "$ref": "#/components/schemas/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Account to global state.", + "type": "object", + "required": [ + "WriteAccount" + ], + "properties": { + "WriteAccount": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Writes a smart contract as Wasm to global state.", + "type": "string", + "enum": [ + "WriteContractWasm" + ] + }, + { + "description": "Writes a smart contract to global state.", + "type": "string", + "enum": [ + "WriteContract" + ] + }, + { + "description": "Writes a smart contract package to global state.", + "type": "string", + "enum": [ + "WriteContractPackage" + ] + }, + { + "description": "Writes the given DeployInfo to global state.", + "type": "object", + "required": [ + "WriteDeployInfo" + ], + "properties": { + "WriteDeployInfo": { + "$ref": "#/components/schemas/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given EraInfo to global state.", + "type": "object", + "required": [ + "WriteEraInfo" + ], + "properties": { + "WriteEraInfo": { + "$ref": "#/components/schemas/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given version 1 Transfer to global state.", + "type": "object", + "required": [ + "WriteTransfer" + ], + "properties": { + "WriteTransfer": { + "$ref": "#/components/schemas/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Bid to global state.", + "type": "object", + "required": [ + "WriteBid" + ], + "properties": { + "WriteBid": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Withdraw to global state.", + "type": "object", + "required": [ + "WriteWithdraw" + ], + "properties": { + "WriteWithdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `i32`.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `u64`.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U128`.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/components/schemas/U128" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U256`.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U512`.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given collection of named keys.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + } + }, + "additionalProperties": false + }, + { + "description": "A failed transformation, containing an error message.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Unbonding to global state.", + "type": "object", + "required": [ + "WriteUnbonding" + ], + "properties": { + "WriteUnbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Writes the addressable entity to global state.", + "type": "string", + "enum": [ + "WriteAddressableEntity" + ] + }, + { + "description": "Removes pathing to keyed value within global state. This is a form of soft delete; the underlying value remains in global state and is reachable from older global state root hashes where it was included in the hash up.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/components/schemas/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given BidKind to global state.", + "type": "object", + "required": [ + "WriteBidKind" + ], + "properties": { + "WriteBidKind": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + } + ] + }, + "DeployInfo": { + "description": "Information relating to the given Deploy.", + "type": "object", + "required": [ + "deploy_hash", + "from", + "gas", + "source", + "transfers" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "transfers": { + "description": "Version 1 transfers performed by the Deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "from": { + "description": "Account identifier of the creator of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "source": { + "description": "Source purse used for payment of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "gas": { + "description": "Gas cost of executing the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "TransferAddr": { + "description": "Hex-encoded version 1 transfer address.", + "type": "string" + }, + "URef": { + "description": "Hex-encoded, formatted URef.", + "type": "string" + }, + "U512": { + "description": "Decimal representation of a 512-bit integer.", + "type": "string" + }, + "EraInfo": { + "description": "Auction metadata. Intended to be recorded at each era.", + "type": "object", + "required": [ + "seigniorage_allocations" + ], + "properties": { + "seigniorage_allocations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeigniorageAllocation" + } + } + }, + "additionalProperties": false + }, + "SeigniorageAllocation": { + "description": "Information about a seigniorage allocation", + "oneOf": [ + { + "description": "Info about a seigniorage allocation for a validator", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "type": "object", + "required": [ + "amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "type": "object", + "required": [ + "amount", + "delegator_public_key", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "Delegator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "DelegatorKind" + ], + "properties": { + "DelegatorKind": { + "type": "object", + "required": [ + "amount", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind", + "allOf": [ + { + "$ref": "#/components/schemas/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "DelegatorKind": { + "description": "Auction bid variants. Kinds of delegation bids.", + "oneOf": [ + { + "description": "Delegation from public key.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "Delegation from purse.", + "type": "object", + "required": [ + "Purse" + ], + "properties": { + "Purse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "TransferV1": { + "description": "Represents a version 1 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "deploy_hash", + "from", + "gas", + "source", + "target" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash of Deploy that created the transfer.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "from": { + "description": "Account from which transfer was executed", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "to": { + "description": "Account to which funds are transferred", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "target": { + "description": "Target purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "amount": { + "description": "Transfer amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "id": { + "description": "User-defined id", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Bid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "delegators": { + "description": "This validator's delegators, indexed by their public keys.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_PublicKeyAndDelegator" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\".", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "VestingSchedule": { + "type": "object", + "required": [ + "initial_release_timestamp_millis" + ], + "properties": { + "initial_release_timestamp_millis": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "locked_amounts": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/U512" + }, + "maxItems": 14, + "minItems": 14 + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndDelegator": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKeyAndDelegator" + } + }, + "PublicKeyAndDelegator": { + "description": "A delegator associated with the given validator.", + "type": "object", + "required": [ + "delegator", + "delegator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "The public key of the delegator.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "delegator": { + "description": "The delegator details.", + "allOf": [ + { + "$ref": "#/components/schemas/Delegator" + } + ] + } + } + }, + "Delegator": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_public_key", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "WithdrawPurse": { + "description": "A withdraw purse, a legacy structure.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "U128": { + "description": "Decimal representation of a 128-bit integer.", + "type": "string" + }, + "U256": { + "description": "Decimal representation of a 256-bit integer.", + "type": "string" + }, + "NamedKey": { + "description": "A key with a name.", + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "name": { + "description": "The name of the entry.", + "type": "string" + }, + "key": { + "description": "The value of the entry: a casper `Key` type.", + "allOf": [ + { + "$ref": "#/components/schemas/Key" + } + ] + } + }, + "additionalProperties": false + }, + "Key": { + "description": "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, user accounts) are stored in global state.", + "type": "string" + }, + "UnbondingPurse": { + "description": "Unbonding purse.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BidKind": { + "description": "Auction bid variants.", + "oneOf": [ + { + "description": "A unified record indexed on validator data, with an embedded collection of all delegator bids assigned to that validator. The Unified variant is for legacy retrograde support, new instances will not be created going forward.", + "type": "object", + "required": [ + "Unified" + ], + "properties": { + "Unified": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only validator data.", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/components/schemas/ValidatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only delegator data.", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "$ref": "#/components/schemas/DelegatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "Bridge" + ], + "properties": { + "Bridge": { + "$ref": "#/components/schemas/Bridge" + } + }, + "additionalProperties": false + }, + { + "description": "Credited amount.", + "type": "object", + "required": [ + "Credit" + ], + "properties": { + "Credit": { + "$ref": "#/components/schemas/ValidatorCredit" + } + }, + "additionalProperties": false + }, + { + "description": "Reservation", + "type": "object", + "required": [ + "Reservation" + ], + "properties": { + "Reservation": { + "$ref": "#/components/schemas/Reservation" + } + }, + "additionalProperties": false + }, + { + "description": "Unbond", + "type": "object", + "required": [ + "Unbond" + ], + "properties": { + "Unbond": { + "$ref": "#/components/schemas/Unbond" + } + }, + "additionalProperties": false + } + ] + }, + "ValidatorBid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "inactive", + "maximum_delegation_amount", + "minimum_delegation_amount", + "reserved_slots", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\"", + "type": "boolean" + }, + "minimum_delegation_amount": { + "description": "Minimum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "maximum_delegation_amount": { + "description": "Maximum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "reserved_slots": { + "description": "Slots reserved for specific delegators", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "DelegatorBid": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_kind", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "$ref": "#/components/schemas/DelegatorKind" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Bridge": { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "era_id", + "new_validator_public_key", + "old_validator_public_key" + ], + "properties": { + "old_validator_public_key": { + "description": "Previous validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "new_validator_public_key": { + "description": "New validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_id": { + "description": "Era when bridge record was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorCredit": { + "description": "Validator credit record.", + "type": "object", + "required": [ + "amount", + "era_id", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_id": { + "description": "The era id the credit was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "The credit amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "Reservation": { + "description": "Represents a validator reserving a slot for specific delegator", + "type": "object", + "required": [ + "delegation_rate", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind.", + "allOf": [ + { + "$ref": "#/components/schemas/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "delegation_rate": { + "description": "Individual delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Unbond": { + "type": "object", + "required": [ + "eras", + "unbond_kind", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbond_kind": { + "description": "Unbond kind.", + "allOf": [ + { + "$ref": "#/components/schemas/UnbondKind" + } + ] + }, + "eras": { + "description": "Unbond amounts per era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondEra" + } + } + }, + "additionalProperties": false + }, + "UnbondKind": { + "description": "Unbond variants.", + "oneOf": [ + { + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPublicKey" + ], + "properties": { + "DelegatedPublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPurse" + ], + "properties": { + "DelegatedPurse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "UnbondEra": { + "description": "Unbond amounts per era.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "ExecutionResultV2": { + "description": "The result of executing a single transaction.", + "type": "object", + "required": [ + "consumed", + "cost", + "effects", + "initiator", + "limit", + "size_estimate", + "transfers" + ], + "properties": { + "initiator": { + "description": "Who initiated this transaction.", + "allOf": [ + { + "$ref": "#/components/schemas/InitiatorAddr" + } + ] + }, + "error_message": { + "description": "If there is no error message, this execution was processed successfully. If there is an error message, this execution failed to fully process for the stated reason.", + "type": [ + "string", + "null" + ] + }, + "limit": { + "description": "What was the maximum allowed gas limit for this transaction?.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "consumed": { + "description": "How much gas was consumed executing this transaction.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "cost": { + "description": "How much was paid for this transaction.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "transfers": { + "description": "A record of transfers performed while executing this transaction.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Transfer" + } + }, + "size_estimate": { + "description": "The size estimate of the transaction", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "effects": { + "description": "The effects of executing this transaction.", + "allOf": [ + { + "$ref": "#/components/schemas/Effects" + } + ] + } + }, + "additionalProperties": false + }, + "Gas": { + "description": "The `Gas` struct represents a `U512` amount of gas.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "Transfer": { + "description": "A versioned wrapper for a transfer.", + "oneOf": [ + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "A version 2 transfer.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/TransferV2" + } + }, + "additionalProperties": false + } + ] + }, + "TransferV2": { + "description": "Represents a version 2 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "from", + "gas", + "source", + "target", + "transaction_hash" + ], + "properties": { + "transaction_hash": { + "description": "Transaction that created the transfer.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionHash" + } + ] + }, + "from": { + "description": "Entity from which transfer was executed.", + "allOf": [ + { + "$ref": "#/components/schemas/InitiatorAddr" + } + ] + }, + "to": { + "description": "Account to which funds are transferred.", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "target": { + "description": "Target purse.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "amount": { + "description": "Transfer amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "id": { + "description": "User-defined ID.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Effects": { + "description": "A log of all transforms produced during execution.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformV2" + } + }, + "TransformV2": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/Key" + }, + "kind": { + "$ref": "#/components/schemas/TransformKindV2" + } + }, + "additionalProperties": false + }, + "TransformKindV2": { + "description": "Representation of a single transformation occurring during execution.\n\nNote that all arithmetic variants of `TransformKindV2` are commutative which means that a given collection of them can be executed in any order to produce the same end result.", + "oneOf": [ + { + "description": "An identity transformation that does not modify a value in the global state.\n\nCreated as a result of reading from the global state.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes a new value in the global state.", + "type": "object", + "required": [ + "Write" + ], + "properties": { + "Write": { + "$ref": "#/components/schemas/StoredValue" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in the global state.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in the global state.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in the global state.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/components/schemas/U128" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in the global state.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in the global state.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds new named keys to an existing entry in the global state.\n\nThis transform assumes that the existing stored value is either an Account or a Contract.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "$ref": "#/components/schemas/NamedKeys" + } + }, + "additionalProperties": false + }, + { + "description": "Removes the pathing to the global state entry of the specified key. The pruned element remains reachable from previously generated global state root hashes, but will not be included in the next generated global state root hash and subsequent state accumulated from it.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/components/schemas/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Represents the case where applying a transform would cause an error.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "$ref": "#/components/schemas/TransformError" + } + }, + "additionalProperties": false + } + ] + }, + "StoredValue": { + "description": "A value stored in Global State.", + "oneOf": [ + { + "description": "A CLValue.", + "type": "object", + "required": [ + "CLValue" + ], + "properties": { + "CLValue": { + "$ref": "#/components/schemas/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "An account.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/Account" + } + }, + "additionalProperties": false + }, + { + "description": "Contract wasm.", + "type": "object", + "required": [ + "ContractWasm" + ], + "properties": { + "ContractWasm": { + "$ref": "#/components/schemas/ContractWasm" + } + }, + "additionalProperties": false + }, + { + "description": "A contract.", + "type": "object", + "required": [ + "Contract" + ], + "properties": { + "Contract": { + "$ref": "#/components/schemas/Contract" + } + }, + "additionalProperties": false + }, + { + "description": "A contract package.", + "type": "object", + "required": [ + "ContractPackage" + ], + "properties": { + "ContractPackage": { + "$ref": "#/components/schemas/ContractPackage" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "$ref": "#/components/schemas/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "Info about a deploy.", + "type": "object", + "required": [ + "DeployInfo" + ], + "properties": { + "DeployInfo": { + "$ref": "#/components/schemas/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Info about an era.", + "type": "object", + "required": [ + "EraInfo" + ], + "properties": { + "EraInfo": { + "$ref": "#/components/schemas/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`Bid`].", + "type": "object", + "required": [ + "Bid" + ], + "properties": { + "Bid": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores withdraw information.", + "type": "object", + "required": [ + "Withdraw" + ], + "properties": { + "Withdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Unbonding information.", + "type": "object", + "required": [ + "Unbonding" + ], + "properties": { + "Unbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "An `AddressableEntity`.", + "type": "object", + "required": [ + "AddressableEntity" + ], + "properties": { + "AddressableEntity": { + "$ref": "#/components/schemas/AddressableEntity" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`BidKind`].", + "type": "object", + "required": [ + "BidKind" + ], + "properties": { + "BidKind": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + }, + { + "description": "A smart contract `Package`.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/components/schemas/Package" + } + }, + "additionalProperties": false + }, + { + "description": "A record of byte code.", + "type": "object", + "required": [ + "ByteCode" + ], + "properties": { + "ByteCode": { + "$ref": "#/components/schemas/ByteCode" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message topic.", + "type": "object", + "required": [ + "MessageTopic" + ], + "properties": { + "MessageTopic": { + "$ref": "#/components/schemas/MessageTopicSummary" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message digest.", + "type": "object", + "required": [ + "Message" + ], + "properties": { + "Message": { + "$ref": "#/components/schemas/MessageChecksum" + } + }, + "additionalProperties": false + }, + { + "description": "A NamedKey record.", + "type": "object", + "required": [ + "NamedKey" + ], + "properties": { + "NamedKey": { + "$ref": "#/components/schemas/NamedKeyValue" + } + }, + "additionalProperties": false + }, + { + "description": "A prepayment record.", + "type": "object", + "required": [ + "Prepayment" + ], + "properties": { + "Prepayment": { + "$ref": "#/components/schemas/PrepaymentKind" + } + }, + "additionalProperties": false + }, + { + "description": "An entrypoint record.", + "type": "object", + "required": [ + "EntryPoint" + ], + "properties": { + "EntryPoint": { + "$ref": "#/components/schemas/EntryPointValue" + } + }, + "additionalProperties": false + }, + { + "description": "Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a [`crate::CLValue`] and [`crate::CLType`].", + "type": "object", + "required": [ + "RawBytes" + ], + "properties": { + "RawBytes": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "Account": { + "description": "Represents an Account in the global state.", + "type": "object", + "required": [ + "account_hash", + "action_thresholds", + "associated_keys", + "main_purse", + "named_keys" + ], + "properties": { + "account_hash": { + "$ref": "#/components/schemas/AccountHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/AccountAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/AccountActionThresholds" + } + }, + "additionalProperties": false + }, + "NamedKeys": { + "description": "A collection of named keys.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + }, + "AccountAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "Array_of_AssociatedKey": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssociatedKey" + } + }, + "AssociatedKey": { + "description": "A weighted public key.", + "type": "object", + "required": [ + "account_hash", + "weight" + ], + "properties": { + "account_hash": { + "description": "The account hash of the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "weight": { + "description": "The weight assigned to the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "AccountAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "AccountActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "ContractWasm": { + "description": "A container for contract's WASM bytes.", + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "Contract": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "contract_package_hash", + "contract_wasm_hash", + "entry_points", + "named_keys", + "protocol_version" + ], + "properties": { + "contract_package_hash": { + "$ref": "#/components/schemas/ContractPackageHash" + }, + "contract_wasm_hash": { + "$ref": "#/components/schemas/ContractWasmHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "entry_points": { + "$ref": "#/components/schemas/Array_of_NamedEntryPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + } + } + }, + "ContractWasmHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "Array_of_NamedEntryPoint": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedEntryPoint" + } + }, + "NamedEntryPoint": { + "type": "object", + "required": [ + "entry_point", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "entry_point": { + "allOf": [ + { + "$ref": "#/components/schemas/EntryPoint" + } + ] + } + } + }, + "EntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + } + } + }, + "Parameter": { + "description": "Parameter to a method", + "type": "object", + "required": [ + "cl_type", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "cl_type": { + "$ref": "#/components/schemas/CLType" + } + } + }, + "EntryPointAccess": { + "description": "Enum describing the possible access control options for a contract entry point (method).", + "oneOf": [ + { + "description": "Anyone can call this method (no access controls).", + "type": "string", + "enum": [ + "Public" + ] + }, + { + "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", + "type": "object", + "required": [ + "Groups" + ], + "properties": { + "Groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + }, + "additionalProperties": false + }, + { + "description": "Can't be accessed directly but are kept in the derived wasm bytes.", + "type": "string", + "enum": [ + "Template" + ] + } + ] + }, + "Group": { + "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", + "type": "string" + }, + "EntryPointType": { + "description": "Context of method execution\n\nMost significant bit represents version i.e. - 0b0 -> 0.x/1.x (session & contracts) - 0b1 -> 2.x and later (introduced installer, utility entry points)", + "oneOf": [ + { + "description": "Runs using the calling entity's context. In v1.x this was used for both \"session\" code run using the originating Account's context, and also for \"StoredSession\" code that ran in the caller's context. While this made systemic sense due to the way the runtime context nesting works, this dual usage was very confusing to most human beings.\n\nIn v2.x the renamed Caller variant is exclusively used for wasm run using the initiating account entity's context. Previously installed 1.x stored session code should continue to work as the binary value matches but we no longer allow such logic to be upgraded, nor do we allow new stored session to be installed.", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Runs using the called entity's context.", + "type": "string", + "enum": [ + "Called" + ] + }, + { + "description": "Extract a subset of bytecode and installs it as a new smart contract. Runs using the called entity's context.", + "type": "string", + "enum": [ + "Factory" + ] + } + ] + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "ContractPackage": { + "description": "Contract definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "access_key": { + "description": "Key used to add or disable versions", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "versions": { + "description": "All versions (enabled & disabled)", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersion" + } + }, + "disabled_versions": { + "description": "Disabled versions", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the contract. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a contract is locked", + "allOf": [ + { + "$ref": "#/components/schemas/ContractPackageStatus" + } + ] + } + } + }, + "ContractVersion": { + "type": "object", + "required": [ + "contract_hash", + "contract_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_version": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_hash": { + "$ref": "#/components/schemas/ContractHash" + } + } + }, + "ContractVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `ContractVersion`.", + "type": "array", + "items": [ + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + ], + "maxItems": 2, + "minItems": 2 + }, + "Array_of_NamedUserGroup": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedUserGroup" + } + }, + "NamedUserGroup": { + "type": "object", + "required": [ + "group_name", + "group_users" + ], + "properties": { + "group_name": { + "allOf": [ + { + "$ref": "#/components/schemas/Group" + } + ] + }, + "group_users": { + "type": "array", + "items": { + "$ref": "#/components/schemas/URef" + }, + "uniqueItems": true + } + } + }, + "ContractPackageStatus": { + "description": "A enum to determine the lock status of the contract package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "AddressableEntity": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "action_thresholds", + "associated_keys", + "byte_code_hash", + "entity_kind", + "main_purse", + "package_hash", + "protocol_version" + ], + "properties": { + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + }, + "entity_kind": { + "$ref": "#/components/schemas/EntityKind" + }, + "package_hash": { + "$ref": "#/components/schemas/PackageHash" + }, + "byte_code_hash": { + "$ref": "#/components/schemas/ByteCodeHash" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/EntityAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/EntityActionThresholds" + } + } + }, + "EntityKind": { + "description": "The type of Package.", + "oneOf": [ + { + "description": "Package associated with a native contract implementation.", + "type": "object", + "required": [ + "System" + ], + "properties": { + "System": { + "$ref": "#/components/schemas/SystemEntityType" + } + }, + "additionalProperties": false + }, + { + "description": "Package associated with an Account hash.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Packages associated with Wasm stored on chain.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/components/schemas/ContractRuntimeTag" + } + }, + "additionalProperties": false + } + ] + }, + "SystemEntityType": { + "description": "System contract types.\n\nUsed by converting to a `u32` and passing as the `system_contract_index` argument of `ext_ffi::casper_get_system_contract()`.", + "oneOf": [ + { + "description": "Mint contract.", + "type": "string", + "enum": [ + "Mint" + ] + }, + { + "description": "Handle Payment contract.", + "type": "string", + "enum": [ + "HandlePayment" + ] + }, + { + "description": "Standard Payment contract.", + "type": "string", + "enum": [ + "StandardPayment" + ] + }, + { + "description": "Auction contract.", + "type": "string", + "enum": [ + "Auction" + ] + } + ] + }, + "ContractRuntimeTag": { + "description": "Runtime used to execute a Transaction.", + "type": "string", + "enum": [ + "VmCasperV1", + "VmCasperV2" + ] + }, + "PackageHash": { + "description": "The hex-encoded address of the Package.", + "type": "string" + }, + "ByteCodeHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntityAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "EntityActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management", + "upgrade_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "upgrade_management": { + "description": "Threshold for upgrading contracts.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + } + } + }, + "EntityAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "Package": { + "description": "Entity definition, metadata, and security container.", + "type": "object", + "required": [ + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "versions": { + "description": "All versions (enabled & disabled).", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_EntityVersionAndHash" + } + ] + }, + "disabled_versions": { + "description": "Collection of disabled entity versions. The runtime will not permit disabled entity versions to be executed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the entity. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a entity is locked", + "allOf": [ + { + "$ref": "#/components/schemas/PackageStatus" + } + ] + } + } + }, + "Array_of_EntityVersionAndHash": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionAndHash" + } + }, + "EntityVersionAndHash": { + "type": "object", + "required": [ + "addressable_entity_hash", + "entity_version_key" + ], + "properties": { + "entity_version_key": { + "allOf": [ + { + "$ref": "#/components/schemas/EntityVersionKey" + } + ] + }, + "addressable_entity_hash": { + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntityHash" + } + ] + } + } + }, + "EntityVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `EntityVersion`.", + "type": "object", + "required": [ + "entity_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "description": "Major element of `ProtocolVersion` a `ContractVersion` is compatible with.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "entity_version": { + "description": "Automatically incremented value for a contract version within a major `ProtocolVersion`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + } + }, + "AddressableEntityHash": { + "description": "The hex-encoded address of the addressable entity.", + "type": "string" + }, + "PackageStatus": { + "description": "A enum to determine the lock status of the package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "ByteCode": { + "description": "A container for contract's Wasm bytes.", + "type": "object", + "required": [ + "bytes", + "kind" + ], + "properties": { + "kind": { + "$ref": "#/components/schemas/ByteCodeKind" + }, + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "ByteCodeKind": { + "description": "The type of Byte code.", + "oneOf": [ + { + "description": "Empty byte code.", + "type": "string", + "enum": [ + "Empty" + ] + }, + { + "description": "Byte code to be executed with the version 1 Casper execution engine.", + "type": "string", + "enum": [ + "V1CasperWasm" + ] + }, + { + "description": "Byte code to be executed with the version 2 Casper execution engine.", + "type": "string", + "enum": [ + "V2CasperWasm" + ] + } + ] + }, + "MessageTopicSummary": { + "description": "Summary of a message topic that will be stored in global state.", + "type": "object", + "required": [ + "blocktime", + "message_count", + "topic_name" + ], + "properties": { + "message_count": { + "description": "Number of messages in this topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "blocktime": { + "description": "Block timestamp in which these messages were emitted.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockTime" + } + ] + }, + "topic_name": { + "description": "Name of the topic.", + "type": "string" + } + } + }, + "BlockTime": { + "description": "A newtype wrapping a [`u64`] which represents the block time.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "MessageChecksum": { + "description": "Message checksum as a formatted string.", + "type": "string" + }, + "NamedKeyValue": { + "description": "A NamedKey value.", + "type": "object", + "required": [ + "name", + "named_key" + ], + "properties": { + "named_key": { + "description": "The actual `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/components/schemas/CLValue" + } + ] + }, + "name": { + "description": "The name of the `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/components/schemas/CLValue" + } + ] + } + } + }, + "PrepaymentKind": { + "description": "Container for bytes recording location, type and data for a gas pre payment", + "type": "object", + "required": [ + "prepayment_data", + "prepayment_kind", + "receipt" + ], + "properties": { + "receipt": { + "$ref": "#/components/schemas/Digest" + }, + "prepayment_kind": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "prepayment_data": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "EntryPointValue": { + "description": "The encaspulated representation of entrypoints.", + "oneOf": [ + { + "description": "Entrypoints to be executed against the V1 Casper VM.", + "type": "object", + "required": [ + "V1CasperVm" + ], + "properties": { + "V1CasperVm": { + "$ref": "#/components/schemas/EntryPoint2" + } + }, + "additionalProperties": false + } + ] + }, + "EntryPoint2": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_payment", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + }, + "entry_point_payment": { + "$ref": "#/components/schemas/EntryPointPayment" + } + } + }, + "EntryPointPayment": { + "description": "An enum specifying who pays for the invocation and execution of the entrypoint.", + "oneOf": [ + { + "description": "The caller must cover costs", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Will cover costs if directly invoked.", + "type": "string", + "enum": [ + "DirectInvocationOnly" + ] + }, + { + "description": "will cover costs to execute self including any subsequent invoked contracts", + "type": "string", + "enum": [ + "SelfOnward" + ] + } + ] + }, + "TransformError": { + "description": "Error type for applying and combining transforms.\n\nA `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible (e.g. trying to add a number to a string).", + "oneOf": [ + { + "description": "Error while (de)serializing data.", + "type": "object", + "required": [ + "Serialization" + ], + "properties": { + "Serialization": { + "$ref": "#/components/schemas/BytesreprError" + } + }, + "additionalProperties": false + }, + { + "description": "Type mismatch error.", + "type": "object", + "required": [ + "TypeMismatch" + ], + "properties": { + "TypeMismatch": { + "$ref": "#/components/schemas/TypeMismatch" + } + }, + "additionalProperties": false + }, + { + "description": "Type no longer supported.", + "type": "string", + "enum": [ + "Deprecated" + ] + } + ] + }, + "BytesreprError": { + "description": "Serialization and deserialization errors.", + "oneOf": [ + { + "description": "Early end of stream while deserializing.", + "type": "string", + "enum": [ + "EarlyEndOfStream" + ] + }, + { + "description": "Formatting error while deserializing.", + "type": "string", + "enum": [ + "Formatting" + ] + }, + { + "description": "Not all input bytes were consumed in [`deserialize`].", + "type": "string", + "enum": [ + "LeftOverBytes" + ] + }, + { + "description": "Out of memory error.", + "type": "string", + "enum": [ + "OutOfMemory" + ] + }, + { + "description": "No serialized representation is available for a value.", + "type": "string", + "enum": [ + "NotRepresentable" + ] + }, + { + "description": "Exceeded a recursion depth limit.", + "type": "string", + "enum": [ + "ExceededRecursionDepth" + ] + } + ] + }, + "TypeMismatch": { + "description": "An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.", + "type": "object", + "required": [ + "expected", + "found" + ], + "properties": { + "expected": { + "description": "The name of the expected type.", + "type": "string" + }, + "found": { + "description": "The actual type found.", + "type": "string" + } + } + }, + "AccountIdentifier": { + "description": "Identifier of an account.", + "anyOf": [ + { + "description": "The public key of an account", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + { + "description": "The account hash of an account", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + } + ] + }, + "BlockIdentifier": { + "description": "Identifier for possible ways to retrieve a block.", + "oneOf": [ + { + "description": "Identify and retrieve the block with its hash.", + "type": "object", + "required": [ + "Hash" + ], + "properties": { + "Hash": { + "$ref": "#/components/schemas/BlockHash" + } + }, + "additionalProperties": false + }, + { + "description": "Identify and retrieve the block with its height.", + "type": "object", + "required": [ + "Height" + ], + "properties": { + "Height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + ] + }, + "EntityIdentifier": { + "description": "Identifier of an addressable entity.", + "oneOf": [ + { + "description": "The hash of a contract.", + "type": "object", + "required": [ + "ContractHash" + ], + "properties": { + "ContractHash": { + "$ref": "#/components/schemas/ContractHash" + } + }, + "additionalProperties": false + }, + { + "description": "The public key of an account.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The account hash of an account.", + "type": "object", + "required": [ + "AccountHash" + ], + "properties": { + "AccountHash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "The address of an addressable entity.", + "type": "object", + "required": [ + "EntityAddr" + ], + "properties": { + "EntityAddr": { + "$ref": "#/components/schemas/EntityAddr" + } + }, + "additionalProperties": false + } + ] + }, + "EntityAddr": { + "description": "The address for an AddressableEntity which contains the 32 bytes and tagging information.", + "anyOf": [ + { + "description": "The address for a system entity account or contract.", + "type": "string" + }, + { + "description": "The address of an entity that corresponds to an Account.", + "type": "string" + }, + { + "description": "The address of an entity that corresponds to a Userland smart contract.", + "type": "string" + } + ] + }, + "EntityWithBackwardCompat": { + "description": "An addressable entity or a legacy account or contract.", + "oneOf": [ + { + "description": "An addressable entity.", + "type": "object", + "required": [ + "AddressableEntity" + ], + "properties": { + "AddressableEntity": { + "type": "object", + "required": [ + "entity", + "entry_points", + "named_keys" + ], + "properties": { + "entity": { + "description": "The addressable entity.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntity" + } + ] + }, + "named_keys": { + "description": "The named keys of the addressable entity.", + "allOf": [ + { + "$ref": "#/components/schemas/NamedKeys" + } + ] + }, + "entry_points": { + "description": "The entry points of the addressable entity.", + "type": "array", + "items": { + "$ref": "#/components/schemas/EntryPointValue" + } + }, + "bytecode": { + "description": "The bytecode of the addressable entity. Returned when `include_bytecode` is `true`.", + "anyOf": [ + { + "$ref": "#/components/schemas/ByteCodeWithProof" + }, + { + "type": "null" + } + ] + } + } + } + }, + "additionalProperties": false + }, + { + "description": "An account.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/Account" + } + }, + "additionalProperties": false + }, + { + "description": "A contract.", + "type": "object", + "required": [ + "Contract" + ], + "properties": { + "Contract": { + "type": "object", + "required": [ + "contract" + ], + "properties": { + "contract": { + "description": "The contract.", + "allOf": [ + { + "$ref": "#/components/schemas/Contract" + } + ] + }, + "wasm": { + "description": "The Wasm code of the contract. Returned when `include_bytecode` is `true`.", + "anyOf": [ + { + "$ref": "#/components/schemas/ContractWasmWithProof" + }, + { + "type": "null" + } + ] + } + } + } + }, + "additionalProperties": false + } + ] + }, + "ByteCodeWithProof": { + "description": "Byte code of an entity with a proof.", + "type": "object", + "required": [ + "code", + "merkle_proof" + ], + "properties": { + "code": { + "$ref": "#/components/schemas/ByteCode" + }, + "merkle_proof": { + "type": "string" + } + } + }, + "ContractWasmWithProof": { + "description": "Wasm code of a contract with a proof.", + "type": "object", + "required": [ + "merkle_proof", + "wasm" + ], + "properties": { + "wasm": { + "$ref": "#/components/schemas/ContractWasm" + }, + "merkle_proof": { + "type": "string" + } + } + }, + "PackageIdentifier": { + "description": "Identifier of a package.", + "oneOf": [ + { + "description": "The address of a package.", + "type": "object", + "required": [ + "PackageAddr" + ], + "properties": { + "PackageAddr": { + "$ref": "#/components/schemas/PackageHash" + } + }, + "additionalProperties": false + }, + { + "description": "The hash of a contract package.", + "type": "object", + "required": [ + "ContractPackageHash" + ], + "properties": { + "ContractPackageHash": { + "$ref": "#/components/schemas/ContractPackageHash" + } + }, + "additionalProperties": false + } + ] + }, + "PackageWithBackwardCompat": { + "description": "A package or a legacy contract package.", + "oneOf": [ + { + "description": "A package.", + "type": "object", + "required": [ + "Package" + ], + "properties": { + "Package": { + "$ref": "#/components/schemas/Package" + } + }, + "additionalProperties": false + }, + { + "description": "A contract package.", + "type": "object", + "required": [ + "ContractPackage" + ], + "properties": { + "ContractPackage": { + "$ref": "#/components/schemas/ContractPackage" + } + }, + "additionalProperties": false + } + ] + }, + "DictionaryIdentifier": { + "description": "Options for dictionary item lookups.", + "oneOf": [ + { + "description": "Lookup a dictionary item via an Account's named keys.", + "type": "object", + "required": [ + "AccountNamedKey" + ], + "properties": { + "AccountNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The account key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via a Contract's named keys.", + "type": "object", + "required": [ + "ContractNamedKey" + ], + "properties": { + "ContractNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The contract key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via an entities named keys.", + "type": "object", + "required": [ + "EntityNamedKey" + ], + "properties": { + "EntityNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The entity address formatted as a string.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via its seed URef.", + "type": "object", + "required": [ + "URef" + ], + "properties": { + "URef": { + "type": "object", + "required": [ + "dictionary_item_key", + "seed_uref" + ], + "properties": { + "seed_uref": { + "description": "The dictionary's seed URef.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via its unique key.", + "type": "object", + "required": [ + "Dictionary" + ], + "properties": { + "Dictionary": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "GlobalStateIdentifier": { + "description": "Identifier for possible ways to query Global State", + "oneOf": [ + { + "description": "Query using a block hash.", + "type": "object", + "required": [ + "BlockHash" + ], + "properties": { + "BlockHash": { + "$ref": "#/components/schemas/BlockHash" + } + }, + "additionalProperties": false + }, + { + "description": "Query using a block height.", + "type": "object", + "required": [ + "BlockHeight" + ], + "properties": { + "BlockHeight": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "Query using the state root hash.", + "type": "object", + "required": [ + "StateRootHash" + ], + "properties": { + "StateRootHash": { + "$ref": "#/components/schemas/Digest" + } + }, + "additionalProperties": false + } + ] + }, + "BlockHeader": { + "description": "The versioned header portion of a block. It encapsulates different variants of the BlockHeader struct.", + "oneOf": [ + { + "description": "The legacy, initial version of the header portion of a block.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/BlockHeaderV1" + } + }, + "additionalProperties": false + }, + { + "description": "The version 2 of the header portion of a block.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/BlockHeaderV2" + } + }, + "additionalProperties": false + } + ] + }, + "BlockHeaderV1": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "era_id", + "height", + "parent_hash", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraEndV1" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/ProtocolVersion" + } + ] + } + } + }, + "EraEndV1": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "era_report", + "next_era_validator_weights" + ], + "properties": { + "era_report": { + "description": "Equivocation, reward and validator inactivity information.", + "allOf": [ + { + "$ref": "#/components/schemas/EraReport_for_PublicKey" + } + ] + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_ValidatorWeight" + } + ] + } + } + }, + "EraReport_for_PublicKey": { + "description": "Equivocation, reward and validator inactivity information.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "rewards": { + "description": "Rewards for finalization of earlier blocks.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_EraReward" + } + ] + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + } + } + }, + "Array_of_EraReward": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EraReward" + } + }, + "EraReward": { + "description": "A validator's public key paired with a measure of the value of its contribution to consensus, as a fraction of the configured maximum block reward.", + "type": "object", + "required": [ + "amount", + "validator" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "The reward amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "Array_of_ValidatorWeight": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidatorWeight" + } + }, + "ValidatorWeight": { + "description": "A validator's public key paired with its weight, i.e. the total number of motes staked by it and its delegators.", + "type": "object", + "required": [ + "validator", + "weight" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "weight": { + "description": "The validator's weight.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + } + }, + "BlockHeaderV2": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "current_gas_price", + "era_id", + "height", + "parent_hash", + "proposer", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/components/schemas/EraEndV2" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/components/schemas/ProtocolVersion" + } + ] + }, + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "current_gas_price": { + "description": "The gas price of the era", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "last_switch_block_hash": { + "description": "The most recent switch block hash.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHash" + }, + { + "type": "null" + } + ] + } + } + }, + "EraEndV2": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "next_era_gas_price", + "next_era_validator_weights", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_ValidatorWeight" + } + ] + }, + "rewards": { + "description": "The rewards distributed to the validators.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/U512" + } + } + }, + "next_era_gas_price": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + } + }, + "PurseIdentifier": { + "description": "Identifier of a purse.", + "oneOf": [ + { + "description": "The main purse of the account identified by this public key.", + "type": "object", + "required": [ + "main_purse_under_public_key" + ], + "properties": { + "main_purse_under_public_key": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The main purse of the account identified by this account hash.", + "type": "object", + "required": [ + "main_purse_under_account_hash" + ], + "properties": { + "main_purse_under_account_hash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "The main purse of the account identified by this entity address.", + "type": "object", + "required": [ + "main_purse_under_entity_addr" + ], + "properties": { + "main_purse_under_entity_addr": { + "$ref": "#/components/schemas/EntityAddr" + } + }, + "additionalProperties": false + }, + { + "description": "The purse identified by this URef.", + "type": "object", + "required": [ + "purse_uref" + ], + "properties": { + "purse_uref": { + "$ref": "#/components/schemas/URef" + } + }, + "additionalProperties": false + } + ] + }, + "BalanceHoldWithProof": { + "type": "object", + "required": [ + "amount", + "proof", + "time" + ], + "properties": { + "time": { + "description": "The block time at which the hold was created.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockTime" + } + ] + }, + "amount": { + "description": "The amount in the hold.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "proof": { + "description": "A proof that the given value is present in the Merkle trie.", + "type": "string" + } + } + }, + "Peers": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" + }, + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "MinimalBlockInfo": { + "description": "Minimal info about a `Block` needed to satisfy the node status request.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/BlockHash" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/components/schemas/Digest" + }, + "creator": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/components/schemas/ActivationPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" + } + ] + } + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" + }, + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + }, + "EraIdentifier": { + "description": "Identifier for an era.", + "oneOf": [ + { + "type": "object", + "required": [ + "Era" + ], + "properties": { + "Era": { + "$ref": "#/components/schemas/EraId" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "Block" + ], + "properties": { + "Block": { + "$ref": "#/components/schemas/BlockIdentifier" + } + }, + "additionalProperties": false + } + ] + }, + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/components/schemas/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "oneOf": [ + { + "description": "The validator got newly added to the validator set.", + "type": "string", + "enum": [ + "Added" + ] + }, + { + "description": "The validator was removed from the validator set.", + "type": "string", + "enum": [ + "Removed" + ] + }, + { + "description": "The validator was banned from this era.", + "type": "string", + "enum": [ + "Banned" + ] + }, + { + "description": "The validator was excluded from proposing new blocks in this era.", + "type": "string", + "enum": [ + "CannotPropose" + ] + }, + { + "description": "We saw the validator misbehave in this era.", + "type": "string", + "enum": [ + "SeenAsFaulty" + ] + } + ] + }, + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Raw bytes of the current chainspec.toml file.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "maybe_genesis_accounts_bytes": { + "description": "Raw bytes of the current genesis accounts.toml file.", + "anyOf": [ + { + "$ref": "#/components/schemas/Bytes" + }, + { + "type": "null" + } + ] + }, + "maybe_global_state_bytes": { + "description": "Raw bytes of the current global_state.toml file.", + "anyOf": [ + { + "$ref": "#/components/schemas/Bytes" + }, + { + "type": "null" + } + ] + } + } + }, + "JsonBlockWithSignatures": { + "description": "A JSON-friendly representation of a block and the signatures for that block.", + "type": "object", + "required": [ + "block", + "proofs" + ], + "properties": { + "block": { + "description": "The block.", + "allOf": [ + { + "$ref": "#/components/schemas/Block" + } + ] + }, + "proofs": { + "description": "The proofs of the block, i.e. a collection of validators' signatures of the block hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_BlockProof" + } + ] + } + }, + "additionalProperties": false + }, + "Block": { + "description": "A block after execution.", + "oneOf": [ + { + "description": "The legacy, initial version of the block.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/BlockV1" + } + }, + "additionalProperties": false + }, + { + "description": "The version 2 of the block.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/BlockV2" + } + }, + "additionalProperties": false + } + ] + }, + "BlockV1": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 1.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHeaderV1" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockBodyV1" + } + ] + } + } + }, + "BlockBodyV1": { + "description": "The body portion of a block. Version 1.", + "type": "object", + "required": [ + "deploy_hashes", + "proposer", + "transfer_hashes" + ], + "properties": { + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "deploy_hashes": { + "description": "The deploy hashes of the non-transfer deploys within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "transfer_hashes": { + "description": "The deploy hashes of the transfers within the block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + } + } + }, + "BlockV2": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 2.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHeaderV2" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockBodyV2" + } + ] + } + } + }, + "BlockBodyV2": { + "description": "The body portion of a block. Version 2.", + "type": "object", + "required": [ + "rewarded_signatures", + "transactions" + ], + "properties": { + "transactions": { + "description": "Map of transactions mapping categories to a list of transaction hashes.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionHash" + } + } + }, + "rewarded_signatures": { + "description": "List of identifiers for finality signatures for a particular past block.", + "allOf": [ + { + "$ref": "#/components/schemas/RewardedSignatures" + } + ] + } + } + }, + "RewardedSignatures": { + "description": "Describes finality signatures that will be rewarded in a block. Consists of a vector of `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor block. The first entry represents the signatures for the parent block, the second for the parent of the parent, and so on.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SingleBlockRewardedSignatures" + } + }, + "SingleBlockRewardedSignatures": { + "description": "List of identifiers for finality signatures for a particular past block.\n\nThat past block height is current_height - signature_rewards_max_delay, the latter being defined in the chainspec.\n\nWe need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality signers because we need a bit of time to get the block finality.", + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "Array_of_BlockProof": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockProof" + } + }, + "BlockProof": { + "description": "A validator's public key paired with a corresponding signature of a given block hash.", + "type": "object", + "required": [ + "public_key", + "signature" + ], + "properties": { + "public_key": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "signature": { + "description": "The validator's signature.", + "allOf": [ + { + "$ref": "#/components/schemas/Signature" + } + ] + } + } + }, + "EraSummary": { + "description": "The summary of an era", + "type": "object", + "required": [ + "block_hash", + "era_id", + "merkle_proof", + "state_root_hash", + "stored_value" + ], + "properties": { + "block_hash": { + "description": "The block hash", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "era_id": { + "description": "The era id", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "stored_value": { + "description": "The StoredValue containing era information", + "allOf": [ + { + "$ref": "#/components/schemas/StoredValue" + } + ] + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "merkle_proof": { + "description": "The Merkle proof", + "type": "string" + } + }, + "additionalProperties": false + }, + "AuctionState": { + "description": "Data structure summarizing auction contract data.", + "type": "object", + "required": [ + "bids", + "block_height", + "era_validators", + "state_root_hash" + ], + "properties": { + "state_root_hash": { + "description": "Global state hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "block_height": { + "description": "Block height.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "era_validators": { + "description": "Era validators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonEraValidators" + } + }, + "bids": { + "description": "All bids.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_PublicKeyAndBid" + } + ] + } + }, + "additionalProperties": false + }, + "JsonEraValidators": { + "description": "The validators for the given era.", + "type": "object", + "required": [ + "era_id", + "validator_weights" + ], + "properties": { + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "validator_weights": { + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorWeight" + } + } + }, + "additionalProperties": false + }, + "JsonValidatorWeight": { + "description": "A validator's weight.", + "type": "object", + "required": [ + "public_key", + "weight" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "weight": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndBid": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKeyAndBid" + } + }, + "PublicKeyAndBid": { + "description": "A bid associated with the given public key.", + "type": "object", + "required": [ + "bid", + "public_key" + ], + "properties": { + "public_key": { + "description": "The public key of the bidder.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bid": { + "description": "The bid details.", + "allOf": [ + { + "$ref": "#/components/schemas/Bid" + } + ] + } + } + }, + "AuctionStateV2": { + "description": "Data structure summarizing auction contract data.", + "type": "object", + "required": [ + "bids", + "block_height", + "era_validators", + "state_root_hash" + ], + "properties": { + "state_root_hash": { + "description": "Global state hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "block_height": { + "description": "Block height.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "era_validators": { + "description": "Era validators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonEraValidators" + } + }, + "bids": { + "description": "All bids.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BidKindWrapper" + } + } + }, + "additionalProperties": false + }, + "BidKindWrapper": { + "type": "object", + "required": [ + "bid", + "public_key" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "bid": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + } + } + } +} \ No newline at end of file diff --git a/resources/test/schema_chainspec_bytes.json b/resources/test/schema_chainspec_bytes.json new file mode 100644 index 00000000..4ce0a7ac --- /dev/null +++ b/resources/test/schema_chainspec_bytes.json @@ -0,0 +1,69 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetChainspecResult", + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "allOf": [ + { + "$ref": "#/definitions/ChainspecRawBytes" + } + ] + } + }, + "definitions": { + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Raw bytes of the current chainspec.toml file.", + "allOf": [ + { + "$ref": "#/definitions/Bytes" + } + ] + }, + "maybe_genesis_accounts_bytes": { + "description": "Raw bytes of the current genesis accounts.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + }, + "maybe_global_state_bytes": { + "description": "Raw bytes of the current global_state.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + } + } + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + } + } +} \ No newline at end of file diff --git a/resources/test/schema_rpc_schema.json b/resources/test/schema_rpc_schema.json new file mode 100644 index 00000000..7e0bf161 --- /dev/null +++ b/resources/test/schema_rpc_schema.json @@ -0,0 +1,642 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OpenRpcSchema", + "description": "The main schema for the casper node's RPC server, compliant with [the OpenRPC Specification](https://spec.open-rpc.org).", + "type": "object", + "required": [ + "components", + "info", + "methods", + "openrpc", + "servers" + ], + "properties": { + "openrpc": { + "type": "string" + }, + "info": { + "$ref": "#/definitions/OpenRpcInfoField" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/OpenRpcServerEntry" + } + }, + "methods": { + "type": "array", + "items": { + "$ref": "#/definitions/Method" + } + }, + "components": { + "$ref": "#/definitions/Components" + } + }, + "definitions": { + "OpenRpcInfoField": { + "type": "object", + "required": [ + "contact", + "description", + "license", + "title", + "version" + ], + "properties": { + "version": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "contact": { + "$ref": "#/definitions/OpenRpcContactField" + }, + "license": { + "$ref": "#/definitions/OpenRpcLicenseField" + } + } + }, + "OpenRpcContactField": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "OpenRpcLicenseField": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "OpenRpcServerEntry": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "Method": { + "description": "The struct containing the documentation for the RPCs.", + "type": "object", + "required": [ + "examples", + "name", + "params", + "result", + "summary" + ], + "properties": { + "name": { + "type": "string" + }, + "summary": { + "type": "string" + }, + "params": { + "type": "array", + "items": { + "$ref": "#/definitions/SchemaParam" + } + }, + "result": { + "$ref": "#/definitions/ResponseResult" + }, + "examples": { + "type": "array", + "items": { + "$ref": "#/definitions/Example" + } + } + } + }, + "SchemaParam": { + "type": "object", + "required": [ + "name", + "required", + "schema" + ], + "properties": { + "name": { + "type": "string" + }, + "schema": { + "$ref": "#/definitions/Schema" + }, + "required": { + "type": "boolean" + } + } + }, + "Schema": { + "description": "A JSON Schema.", + "anyOf": [ + { + "description": "A trivial boolean JSON Schema.\n\nThe schema `true` matches everything (always passes validation), whereas the schema `false` matches nothing (always fails validation).", + "type": "boolean" + }, + { + "description": "A JSON Schema object.", + "allOf": [ + { + "$ref": "#/definitions/SchemaObject" + } + ] + } + ] + }, + "SchemaObject": { + "description": "A JSON Schema object.", + "type": "object", + "properties": { + "type": { + "description": "The `type` keyword.\n\nSee [JSON Schema Validation 6.1.1. \"type\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.1) and [JSON Schema 4.2.1. Instance Data Model](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-4.2.1).", + "anyOf": [ + { + "$ref": "#/definitions/SingleOrVec_for_InstanceType" + }, + { + "type": "null" + } + ] + }, + "format": { + "description": "The `format` keyword.\n\nSee [JSON Schema Validation 7. A Vocabulary for Semantic Content With \"format\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-7).", + "type": [ + "string", + "null" + ] + }, + "enum": { + "description": "The `enum` keyword.\n\nSee [JSON Schema Validation 6.1.2. \"enum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.2)", + "type": [ + "array", + "null" + ], + "items": true + }, + "const": { + "description": "The `const` keyword.\n\nSee [JSON Schema Validation 6.1.3. \"const\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.1.3)" + }, + "$ref": { + "description": "The `$ref` keyword.\n\nSee [JSON Schema 8.2.4.1. Direct References with \"$ref\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-8.2.4.1).", + "type": [ + "string", + "null" + ] + }, + "$id": { + "description": "The `$id` keyword.\n\nSee [JSON Schema 8.2.2. The \"$id\" Keyword](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-8.2.2).", + "type": [ + "string", + "null" + ] + }, + "title": { + "description": "The `title` keyword.\n\nSee [JSON Schema Validation 9.1. \"title\" and \"description\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.1).", + "type": [ + "string", + "null" + ] + }, + "description": { + "description": "The `description` keyword.\n\nSee [JSON Schema Validation 9.1. \"title\" and \"description\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.1).", + "type": [ + "string", + "null" + ] + }, + "default": { + "description": "The `default` keyword.\n\nSee [JSON Schema Validation 9.2. \"default\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.2)." + }, + "deprecated": { + "description": "The `deprecated` keyword.\n\nSee [JSON Schema Validation 9.3. \"deprecated\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.3).", + "type": "boolean" + }, + "readOnly": { + "description": "The `readOnly` keyword.\n\nSee [JSON Schema Validation 9.4. \"readOnly\" and \"writeOnly\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.4).", + "type": "boolean" + }, + "writeOnly": { + "description": "The `writeOnly` keyword.\n\nSee [JSON Schema Validation 9.4. \"readOnly\" and \"writeOnly\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.4).", + "type": "boolean" + }, + "examples": { + "description": "The `examples` keyword.\n\nSee [JSON Schema Validation 9.5. \"examples\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-9.5).", + "type": "array", + "items": true + }, + "allOf": { + "description": "The `allOf` keyword.\n\nSee [JSON Schema 9.2.1.1. \"allOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.1).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "anyOf": { + "description": "The `anyOf` keyword.\n\nSee [JSON Schema 9.2.1.2. \"anyOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.2).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "oneOf": { + "description": "The `oneOf` keyword.\n\nSee [JSON Schema 9.2.1.3. \"oneOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.3).", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Schema" + } + }, + "not": { + "description": "The `not` keyword.\n\nSee [JSON Schema 9.2.1.4. \"not\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.1.4).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "if": { + "description": "The `if` keyword.\n\nSee [JSON Schema 9.2.2.1. \"if\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.1).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "then": { + "description": "The `then` keyword.\n\nSee [JSON Schema 9.2.2.2. \"then\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.2).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "else": { + "description": "The `else` keyword.\n\nSee [JSON Schema 9.2.2.3. \"else\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.2.2.3).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "multipleOf": { + "description": "The `multipleOf` keyword.\n\nSee [JSON Schema Validation 6.2.1. \"multipleOf\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.1).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "maximum": { + "description": "The `maximum` keyword.\n\nSee [JSON Schema Validation 6.2.2. \"maximum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.2).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "exclusiveMaximum": { + "description": "The `exclusiveMaximum` keyword.\n\nSee [JSON Schema Validation 6.2.3. \"exclusiveMaximum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.3).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "minimum": { + "description": "The `minimum` keyword.\n\nSee [JSON Schema Validation 6.2.4. \"minimum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.4).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "exclusiveMinimum": { + "description": "The `exclusiveMinimum` keyword.\n\nSee [JSON Schema Validation 6.2.5. \"exclusiveMinimum\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.2.5).", + "type": [ + "number", + "null" + ], + "format": "double" + }, + "maxLength": { + "description": "The `maxLength` keyword.\n\nSee [JSON Schema Validation 6.3.1. \"maxLength\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minLength": { + "description": "The `minLength` keyword.\n\nSee [JSON Schema Validation 6.3.2. \"minLength\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "pattern": { + "description": "The `pattern` keyword.\n\nSee [JSON Schema Validation 6.3.3. \"pattern\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.3.3).", + "type": [ + "string", + "null" + ] + }, + "items": { + "description": "The `items` keyword.\n\nSee [JSON Schema 9.3.1.1. \"items\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.1).", + "anyOf": [ + { + "$ref": "#/definitions/SingleOrVec_for_Schema" + }, + { + "type": "null" + } + ] + }, + "additionalItems": { + "description": "The `additionalItems` keyword.\n\nSee [JSON Schema 9.3.1.2. \"additionalItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.2).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "maxItems": { + "description": "The `maxItems` keyword.\n\nSee [JSON Schema Validation 6.4.1. \"maxItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minItems": { + "description": "The `minItems` keyword.\n\nSee [JSON Schema Validation 6.4.2. \"minItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "uniqueItems": { + "description": "The `uniqueItems` keyword.\n\nSee [JSON Schema Validation 6.4.3. \"uniqueItems\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.4.3).", + "type": [ + "boolean", + "null" + ] + }, + "contains": { + "description": "The `contains` keyword.\n\nSee [JSON Schema 9.3.1.4. \"contains\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.1.4).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "maxProperties": { + "description": "The `maxProperties` keyword.\n\nSee [JSON Schema Validation 6.5.1. \"maxProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.1).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "minProperties": { + "description": "The `minProperties` keyword.\n\nSee [JSON Schema Validation 6.5.2. \"minProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.2).", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "required": { + "description": "The `required` keyword.\n\nSee [JSON Schema Validation 6.5.3. \"required\"](https://tools.ietf.org/html/draft-handrews-json-schema-validation-02#section-6.5.3).", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "properties": { + "description": "The `properties` keyword.\n\nSee [JSON Schema 9.3.2.1. \"properties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.1).", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + }, + "patternProperties": { + "description": "The `patternProperties` keyword.\n\nSee [JSON Schema 9.3.2.2. \"patternProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.2).", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + }, + "additionalProperties": { + "description": "The `additionalProperties` keyword.\n\nSee [JSON Schema 9.3.2.3. \"additionalProperties\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.3).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + }, + "propertyNames": { + "description": "The `propertyNames` keyword.\n\nSee [JSON Schema 9.3.2.5. \"propertyNames\"](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-9.3.2.5).", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true + }, + "SingleOrVec_for_InstanceType": { + "description": "A type which can be serialized as a single item, or multiple items.\n\nIn some contexts, a `Single` may be semantically distinct from a `Vec` containing only item.", + "anyOf": [ + { + "$ref": "#/definitions/InstanceType" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/InstanceType" + } + } + ] + }, + "InstanceType": { + "description": "The possible types of values in JSON Schema documents.\n\nSee [JSON Schema 4.2.1. Instance Data Model](https://tools.ietf.org/html/draft-handrews-json-schema-02#section-4.2.1).", + "type": "string", + "enum": [ + "null", + "boolean", + "object", + "array", + "number", + "string", + "integer" + ] + }, + "SingleOrVec_for_Schema": { + "description": "A type which can be serialized as a single item, or multiple items.\n\nIn some contexts, a `Single` may be semantically distinct from a `Vec` containing only item.", + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/Schema" + } + } + ] + }, + "ResponseResult": { + "type": "object", + "required": [ + "name", + "schema" + ], + "properties": { + "name": { + "type": "string" + }, + "schema": { + "$ref": "#/definitions/Schema" + } + } + }, + "Example": { + "description": "An example pair of request params and response result.", + "type": "object", + "required": [ + "name", + "params", + "result" + ], + "properties": { + "name": { + "type": "string" + }, + "params": { + "type": "array", + "items": { + "$ref": "#/definitions/ExampleParam" + } + }, + "result": { + "$ref": "#/definitions/ExampleResult" + } + } + }, + "ExampleParam": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": true + } + }, + "ExampleResult": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": true + } + }, + "Components": { + "type": "object", + "required": [ + "schemas" + ], + "properties": { + "schemas": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Schema" + } + } + } + } + } +} \ No newline at end of file diff --git a/resources/test/schema_status.json b/resources/test/schema_status.json new file mode 100644 index 00000000..31a9a1a7 --- /dev/null +++ b/resources/test/schema_status.json @@ -0,0 +1,384 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetStatusResult", + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "protocol_version", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "protocol_version": { + "description": "The current Casper protocol version.", + "allOf": [ + { + "$ref": "#/definitions/ProtocolVersion" + } + ] + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "allOf": [ + { + "$ref": "#/definitions/Peers" + } + ] + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/definitions/MinimalBlockInfo" + }, + { + "type": "null" + } + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/definitions/PublicKey" + }, + { + "type": "null" + } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/definitions/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/definitions/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "allOf": [ + { + "$ref": "#/definitions/TimeDiff" + } + ] + }, + "reactor_state": { + "description": "The name of the current state of node reactor.", + "type": "string" + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + }, + "available_block_range": { + "description": "The available block range in storage.", + "allOf": [ + { + "$ref": "#/definitions/AvailableBlockRange" + } + ] + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "allOf": [ + { + "$ref": "#/definitions/BlockSynchronizerStatus" + } + ] + }, + "latest_switch_block_hash": { + "description": "The hash of the latest switch block.", + "anyOf": [ + { + "$ref": "#/definitions/BlockHash" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "Peers": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/definitions/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" + }, + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "MinimalBlockInfo": { + "description": "Minimal info about a `Block` needed to satisfy the node status request.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/definitions/BlockHash" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "era_id": { + "$ref": "#/definitions/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/definitions/Digest" + }, + "creator": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/definitions/ActivationPoint" + }, + "protocol_version": { + "$ref": "#/definitions/ProtocolVersion" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + } + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + } + } +} \ No newline at end of file diff --git a/resources/test/schema_validator_changes.json b/resources/test/schema_validator_changes.json new file mode 100644 index 00000000..c7a7340d --- /dev/null +++ b/resources/test/schema_validator_changes.json @@ -0,0 +1,146 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetValidatorChangesResult", + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorChanges" + } + } + }, + "additionalProperties": false, + "definitions": { + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/definitions/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "oneOf": [ + { + "description": "The validator got newly added to the validator set.", + "type": "string", + "enum": [ + "Added" + ] + }, + { + "description": "The validator was removed from the validator set.", + "type": "string", + "enum": [ + "Removed" + ] + }, + { + "description": "The validator was banned from this era.", + "type": "string", + "enum": [ + "Banned" + ] + }, + { + "description": "The validator was excluded from proposing new blocks in this era.", + "type": "string", + "enum": [ + "CannotPropose" + ] + }, + { + "description": "We saw the validator misbehave in this era.", + "type": "string", + "enum": [ + "SeenAsFaulty" + ] + } + ] + } + } +} \ No newline at end of file diff --git a/resources/test/speculative_rpc_schema.json b/resources/test/speculative_rpc_schema.json new file mode 100644 index 00000000..03d0d77e --- /dev/null +++ b/resources/test/speculative_rpc_schema.json @@ -0,0 +1,4066 @@ +{ + "openrpc": "1.0.0-rc1", + "info": { + "version": "2.0.0", + "title": "Speculative execution client API of Casper Node", + "description": "This describes the JSON-RPC 2.0 API of the speculative execution functinality of a node on the Casper network.", + "contact": { + "name": "Casper Labs", + "url": "https://casperlabs.io" + }, + "license": { + "name": "APACHE LICENSE, VERSION 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + } + }, + "servers": [ + { + "name": "any Sidecar with speculative JSON RPC API enabled", + "url": "http://IP:PORT/rpc/" + } + ], + "methods": [ + { + "name": "speculative_exec", + "summary": "receives a Deploy to be executed by the network (DEPRECATED: use `account_put_transaction` instead)", + "params": [ + { + "name": "deploy", + "schema": { + "description": "Deploy to execute.", + "$ref": "#/components/schemas/Deploy" + }, + "required": true + } + ], + "result": { + "name": "speculative_exec_result", + "schema": { + "description": "Result for \"speculative_exec_txn\" and \"speculative_exec\" RPC responses.", + "type": "object", + "required": [ + "api_version", + "execution_result" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "execution_result": { + "description": "Result of the speculative execution.", + "$ref": "#/components/schemas/SpeculativeExecutionResult" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "speculative_exec_example", + "params": [ + { + "name": "deploy", + "value": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + } + } + ], + "result": { + "name": "speculative_exec_example_result", + "value": { + "api_version": "2.0.0", + "execution_result": { + "block_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "transfers": [], + "limit": "0", + "consumed": "0", + "effects": [], + "messages": [], + "error": null + } + } + } + } + ] + }, + { + "name": "speculative_exec_txn", + "summary": "receives a Deploy to be executed by the network (DEPRECATED: use `account_put_transaction` instead)", + "params": [ + { + "name": "transaction", + "schema": { + "description": "Transaction to execute.", + "$ref": "#/components/schemas/Transaction" + }, + "required": true + } + ], + "result": { + "name": "speculative_exec_txn_result", + "schema": { + "description": "Result for \"speculative_exec_txn\" and \"speculative_exec\" RPC responses.", + "type": "object", + "required": [ + "api_version", + "execution_result" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "execution_result": { + "description": "Result of the speculative execution.", + "$ref": "#/components/schemas/SpeculativeExecutionResult" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "speculative_exec_txn_example", + "params": [ + { + "name": "transaction", + "value": { + "Version1": { + "hash": "e6bc93a30584fefced99be08c1861164b357c74e4904b5d2e7146896aaaaf843", + "payload": { + "initiator_addr": { + "PublicKey": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "chain_name": "casper-example", + "pricing_mode": { + "Fixed": { + "additional_computation_factor": 0, + "gas_price_tolerance": 5 + } + }, + "fields": { + "args": { + "Named": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ] + }, + "entry_point": "Transfer", + "scheduling": "Standard", + "target": "Native" + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "0124fd45fa4ad49f048d116ba86fa84cecc8e4a1c49cbae824db97472a9f5de6c63e6e77723e55d00b78605ee9a69a27ea23a73e47d29d0778534e053f18ca4101" + } + ] + } + } + } + ], + "result": { + "name": "speculative_exec_txn_example_result", + "value": { + "api_version": "2.0.0", + "execution_result": { + "block_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "transfers": [], + "limit": "0", + "consumed": "0", + "effects": [], + "messages": [], + "error": null + } + } + } + } + ] + } + ], + "components": { + "schemas": { + "Deploy": { + "description": "A signed smart contract.", + "type": "object", + "required": [ + "approvals", + "hash", + "header", + "payment", + "session" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/DeployHash" + }, + "header": { + "$ref": "#/components/schemas/DeployHeader" + }, + "payment": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "session": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Approval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "DeployHash": { + "description": "Hex-encoded deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "DeployHeader": { + "description": "The header portion of a [`Deploy`].", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "dependencies", + "gas_price", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/components/schemas/PublicKey" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "gas_price": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "dependencies": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "ExecutableDeployItem": { + "description": "The executable component of a [`Deploy`].", + "oneOf": [ + { + "description": "Executable specified as raw bytes that represent Wasm code and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "ModuleBytes" + ], + "properties": { + "ModuleBytes": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Hex-encoded raw Wasm bytes.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByHash" + ], + "properties": { + "StoredContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract hash.", + "allOf": [ + { + "$ref": "#/components/schemas/ContractHash" + } + ] + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByName" + ], + "properties": { + "StoredContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by its [`PackageHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByHash" + ], + "properties": { + "StoredVersionedContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract package hash.", + "allOf": [ + { + "$ref": "#/components/schemas/ContractPackageHash" + } + ] + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByName" + ], + "properties": { + "StoredVersionedContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A native transfer which does not contain or reference a Wasm code.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "type": "object", + "required": [ + "args" + ], + "properties": { + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + }, + "RuntimeArgs": { + "description": "Represents a collection of arguments passed to a smart contract.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedArg" + } + }, + "NamedArg": { + "description": "Named arguments to a contract.", + "type": "array", + "items": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/CLValue" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "CLValue": { + "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + "type": "object", + "required": [ + "bytes", + "cl_type" + ], + "properties": { + "cl_type": { + "$ref": "#/components/schemas/CLType" + }, + "bytes": { + "type": "string" + }, + "parsed": true + }, + "additionalProperties": false + }, + "CLType": { + "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", + "oneOf": [ + { + "description": "`bool` primitive.", + "type": "string", + "enum": [ + "Bool" + ] + }, + { + "description": "`i32` primitive.", + "type": "string", + "enum": [ + "I32" + ] + }, + { + "description": "`i64` primitive.", + "type": "string", + "enum": [ + "I64" + ] + }, + { + "description": "`u8` primitive.", + "type": "string", + "enum": [ + "U8" + ] + }, + { + "description": "`u32` primitive.", + "type": "string", + "enum": [ + "U32" + ] + }, + { + "description": "`u64` primitive.", + "type": "string", + "enum": [ + "U64" + ] + }, + { + "description": "[`U128`] large unsigned integer type.", + "type": "string", + "enum": [ + "U128" + ] + }, + { + "description": "[`U256`] large unsigned integer type.", + "type": "string", + "enum": [ + "U256" + ] + }, + { + "description": "[`U512`] large unsigned integer type.", + "type": "string", + "enum": [ + "U512" + ] + }, + { + "description": "`()` primitive.", + "type": "string", + "enum": [ + "Unit" + ] + }, + { + "description": "`String` primitive.", + "type": "string", + "enum": [ + "String" + ] + }, + { + "description": "[`Key`] system type.", + "type": "string", + "enum": [ + "Key" + ] + }, + { + "description": "[`URef`] system type.", + "type": "string", + "enum": [ + "URef" + ] + }, + { + "description": "[`PublicKey`](crate::PublicKey) system type.", + "type": "string", + "enum": [ + "PublicKey" + ] + }, + { + "description": "`Option` of a `CLType`.", + "type": "object", + "required": [ + "Option" + ], + "properties": { + "Option": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", + "type": "object", + "required": [ + "List" + ], + "properties": { + "List": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { + "type": "object", + "required": [ + "err", + "ok" + ], + "properties": { + "ok": { + "$ref": "#/components/schemas/CLType" + }, + "err": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/CLType" + }, + "value": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + }, + { + "description": "Unspecified type.", + "type": "string", + "enum": [ + "Any" + ] + } + ] + }, + "ContractHash": { + "description": "The hash address of the contract", + "type": "string" + }, + "ContractPackageHash": { + "description": "The hash address of the contract package", + "type": "string" + }, + "Approval": { + "description": "A struct containing a signature of a transaction hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "SpeculativeExecutionResult": { + "type": "object", + "required": [ + "block_hash", + "consumed", + "effects", + "limit", + "messages", + "transfers" + ], + "properties": { + "block_hash": { + "description": "Block hash against which the execution was performed.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "transfers": { + "description": "List of transfers that happened during execution.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Transfer" + } + }, + "limit": { + "description": "Gas limit.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "consumed": { + "description": "Gas consumed.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "effects": { + "description": "Execution effects.", + "allOf": [ + { + "$ref": "#/components/schemas/Effects" + } + ] + }, + "messages": { + "description": "Messages emitted during execution.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Message" + } + }, + "error": { + "description": "Did the wasm execute successfully?", + "type": [ + "string", + "null" + ] + } + } + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "Transfer": { + "description": "A versioned wrapper for a transfer.", + "oneOf": [ + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "A version 2 transfer.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/components/schemas/TransferV2" + } + }, + "additionalProperties": false + } + ] + }, + "TransferV1": { + "description": "Represents a version 1 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "deploy_hash", + "from", + "gas", + "source", + "target" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash of Deploy that created the transfer.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "from": { + "description": "Account from which transfer was executed", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "to": { + "description": "Account to which funds are transferred", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "target": { + "description": "Target purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "amount": { + "description": "Transfer amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "id": { + "description": "User-defined id", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "AccountHash": { + "description": "Account hash as a formatted string.", + "type": "string" + }, + "URef": { + "description": "Hex-encoded, formatted URef.", + "type": "string" + }, + "U512": { + "description": "Decimal representation of a 512-bit integer.", + "type": "string" + }, + "TransferV2": { + "description": "Represents a version 2 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "from", + "gas", + "source", + "target", + "transaction_hash" + ], + "properties": { + "transaction_hash": { + "description": "Transaction that created the transfer.", + "allOf": [ + { + "$ref": "#/components/schemas/TransactionHash" + } + ] + }, + "from": { + "description": "Entity from which transfer was executed.", + "allOf": [ + { + "$ref": "#/components/schemas/InitiatorAddr" + } + ] + }, + "to": { + "description": "Account to which funds are transferred.", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "target": { + "description": "Target purse.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "amount": { + "description": "Transfer amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas.", + "allOf": [ + { + "$ref": "#/components/schemas/Gas" + } + ] + }, + "id": { + "description": "User-defined ID.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "TransactionHash": { + "description": "A versioned wrapper for a transaction hash or deploy hash.", + "oneOf": [ + { + "description": "A deploy hash.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction hash.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1Hash" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1Hash": { + "description": "Hex-encoded TransactionV1 hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "InitiatorAddr": { + "description": "The address of the initiator of a TransactionV1.", + "oneOf": [ + { + "description": "The public key of the initiator.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The account hash derived from the public key of the initiator.", + "type": "object", + "required": [ + "AccountHash" + ], + "properties": { + "AccountHash": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + } + ] + }, + "Gas": { + "description": "The `Gas` struct represents a `U512` amount of gas.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "Effects": { + "description": "A log of all transforms produced during execution.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformV2" + } + }, + "TransformV2": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/Key" + }, + "kind": { + "$ref": "#/components/schemas/TransformKindV2" + } + }, + "additionalProperties": false + }, + "Key": { + "description": "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, user accounts) are stored in global state.", + "type": "string" + }, + "TransformKindV2": { + "description": "Representation of a single transformation occurring during execution.\n\nNote that all arithmetic variants of `TransformKindV2` are commutative which means that a given collection of them can be executed in any order to produce the same end result.", + "oneOf": [ + { + "description": "An identity transformation that does not modify a value in the global state.\n\nCreated as a result of reading from the global state.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes a new value in the global state.", + "type": "object", + "required": [ + "Write" + ], + "properties": { + "Write": { + "$ref": "#/components/schemas/StoredValue" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in the global state.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in the global state.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in the global state.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/components/schemas/U128" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in the global state.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in the global state.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds new named keys to an existing entry in the global state.\n\nThis transform assumes that the existing stored value is either an Account or a Contract.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "$ref": "#/components/schemas/NamedKeys" + } + }, + "additionalProperties": false + }, + { + "description": "Removes the pathing to the global state entry of the specified key. The pruned element remains reachable from previously generated global state root hashes, but will not be included in the next generated global state root hash and subsequent state accumulated from it.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/components/schemas/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Represents the case where applying a transform would cause an error.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "$ref": "#/components/schemas/TransformError" + } + }, + "additionalProperties": false + } + ] + }, + "StoredValue": { + "description": "A value stored in Global State.", + "oneOf": [ + { + "description": "A CLValue.", + "type": "object", + "required": [ + "CLValue" + ], + "properties": { + "CLValue": { + "$ref": "#/components/schemas/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "An account.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/Account" + } + }, + "additionalProperties": false + }, + { + "description": "Contract wasm.", + "type": "object", + "required": [ + "ContractWasm" + ], + "properties": { + "ContractWasm": { + "$ref": "#/components/schemas/ContractWasm" + } + }, + "additionalProperties": false + }, + { + "description": "A contract.", + "type": "object", + "required": [ + "Contract" + ], + "properties": { + "Contract": { + "$ref": "#/components/schemas/Contract" + } + }, + "additionalProperties": false + }, + { + "description": "A contract package.", + "type": "object", + "required": [ + "ContractPackage" + ], + "properties": { + "ContractPackage": { + "$ref": "#/components/schemas/ContractPackage" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "$ref": "#/components/schemas/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "Info about a deploy.", + "type": "object", + "required": [ + "DeployInfo" + ], + "properties": { + "DeployInfo": { + "$ref": "#/components/schemas/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Info about an era.", + "type": "object", + "required": [ + "EraInfo" + ], + "properties": { + "EraInfo": { + "$ref": "#/components/schemas/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`Bid`].", + "type": "object", + "required": [ + "Bid" + ], + "properties": { + "Bid": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores withdraw information.", + "type": "object", + "required": [ + "Withdraw" + ], + "properties": { + "Withdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Unbonding information.", + "type": "object", + "required": [ + "Unbonding" + ], + "properties": { + "Unbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "An `AddressableEntity`.", + "type": "object", + "required": [ + "AddressableEntity" + ], + "properties": { + "AddressableEntity": { + "$ref": "#/components/schemas/AddressableEntity" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`BidKind`].", + "type": "object", + "required": [ + "BidKind" + ], + "properties": { + "BidKind": { + "$ref": "#/components/schemas/BidKind" + } + }, + "additionalProperties": false + }, + { + "description": "A smart contract `Package`.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/components/schemas/Package" + } + }, + "additionalProperties": false + }, + { + "description": "A record of byte code.", + "type": "object", + "required": [ + "ByteCode" + ], + "properties": { + "ByteCode": { + "$ref": "#/components/schemas/ByteCode" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message topic.", + "type": "object", + "required": [ + "MessageTopic" + ], + "properties": { + "MessageTopic": { + "$ref": "#/components/schemas/MessageTopicSummary" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message digest.", + "type": "object", + "required": [ + "Message" + ], + "properties": { + "Message": { + "$ref": "#/components/schemas/MessageChecksum" + } + }, + "additionalProperties": false + }, + { + "description": "A NamedKey record.", + "type": "object", + "required": [ + "NamedKey" + ], + "properties": { + "NamedKey": { + "$ref": "#/components/schemas/NamedKeyValue" + } + }, + "additionalProperties": false + }, + { + "description": "A prepayment record.", + "type": "object", + "required": [ + "Prepayment" + ], + "properties": { + "Prepayment": { + "$ref": "#/components/schemas/PrepaymentKind" + } + }, + "additionalProperties": false + }, + { + "description": "An entrypoint record.", + "type": "object", + "required": [ + "EntryPoint" + ], + "properties": { + "EntryPoint": { + "$ref": "#/components/schemas/EntryPointValue" + } + }, + "additionalProperties": false + }, + { + "description": "Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a [`crate::CLValue`] and [`crate::CLType`].", + "type": "object", + "required": [ + "RawBytes" + ], + "properties": { + "RawBytes": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "Account": { + "description": "Represents an Account in the global state.", + "type": "object", + "required": [ + "account_hash", + "action_thresholds", + "associated_keys", + "main_purse", + "named_keys" + ], + "properties": { + "account_hash": { + "$ref": "#/components/schemas/AccountHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/AccountAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/AccountActionThresholds" + } + }, + "additionalProperties": false + }, + "NamedKeys": { + "description": "A collection of named keys.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + }, + "NamedKey": { + "description": "A key with a name.", + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "name": { + "description": "The name of the entry.", + "type": "string" + }, + "key": { + "description": "The value of the entry: a casper `Key` type.", + "allOf": [ + { + "$ref": "#/components/schemas/Key" + } + ] + } + }, + "additionalProperties": false + }, + "AccountAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "Array_of_AssociatedKey": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssociatedKey" + } + }, + "AssociatedKey": { + "description": "A weighted public key.", + "type": "object", + "required": [ + "account_hash", + "weight" + ], + "properties": { + "account_hash": { + "description": "The account hash of the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "weight": { + "description": "The weight assigned to the public key.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "AccountAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "AccountActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "ContractWasm": { + "description": "A container for contract's WASM bytes.", + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "Contract": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "contract_package_hash", + "contract_wasm_hash", + "entry_points", + "named_keys", + "protocol_version" + ], + "properties": { + "contract_package_hash": { + "$ref": "#/components/schemas/ContractPackageHash" + }, + "contract_wasm_hash": { + "$ref": "#/components/schemas/ContractWasmHash" + }, + "named_keys": { + "$ref": "#/components/schemas/NamedKeys" + }, + "entry_points": { + "$ref": "#/components/schemas/Array_of_NamedEntryPoint" + }, + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + } + } + }, + "ContractWasmHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "Array_of_NamedEntryPoint": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedEntryPoint" + } + }, + "NamedEntryPoint": { + "type": "object", + "required": [ + "entry_point", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "entry_point": { + "allOf": [ + { + "$ref": "#/components/schemas/EntryPoint" + } + ] + } + } + }, + "EntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + } + } + }, + "Parameter": { + "description": "Parameter to a method", + "type": "object", + "required": [ + "cl_type", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "cl_type": { + "$ref": "#/components/schemas/CLType" + } + } + }, + "EntryPointAccess": { + "description": "Enum describing the possible access control options for a contract entry point (method).", + "oneOf": [ + { + "description": "Anyone can call this method (no access controls).", + "type": "string", + "enum": [ + "Public" + ] + }, + { + "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", + "type": "object", + "required": [ + "Groups" + ], + "properties": { + "Groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + }, + "additionalProperties": false + }, + { + "description": "Can't be accessed directly but are kept in the derived wasm bytes.", + "type": "string", + "enum": [ + "Template" + ] + } + ] + }, + "Group": { + "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", + "type": "string" + }, + "EntryPointType": { + "description": "Context of method execution\n\nMost significant bit represents version i.e. - 0b0 -> 0.x/1.x (session & contracts) - 0b1 -> 2.x and later (introduced installer, utility entry points)", + "oneOf": [ + { + "description": "Runs using the calling entity's context. In v1.x this was used for both \"session\" code run using the originating Account's context, and also for \"StoredSession\" code that ran in the caller's context. While this made systemic sense due to the way the runtime context nesting works, this dual usage was very confusing to most human beings.\n\nIn v2.x the renamed Caller variant is exclusively used for wasm run using the initiating account entity's context. Previously installed 1.x stored session code should continue to work as the binary value matches but we no longer allow such logic to be upgraded, nor do we allow new stored session to be installed.", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Runs using the called entity's context.", + "type": "string", + "enum": [ + "Called" + ] + }, + { + "description": "Extract a subset of bytecode and installs it as a new smart contract. Runs using the called entity's context.", + "type": "string", + "enum": [ + "Factory" + ] + } + ] + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "ContractPackage": { + "description": "Contract definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "access_key": { + "description": "Key used to add or disable versions", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "versions": { + "description": "All versions (enabled & disabled)", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersion" + } + }, + "disabled_versions": { + "description": "Disabled versions", + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the contract. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a contract is locked", + "allOf": [ + { + "$ref": "#/components/schemas/ContractPackageStatus" + } + ] + } + } + }, + "ContractVersion": { + "type": "object", + "required": [ + "contract_hash", + "contract_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_version": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_hash": { + "$ref": "#/components/schemas/ContractHash" + } + } + }, + "ContractVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `ContractVersion`.", + "type": "array", + "items": [ + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + ], + "maxItems": 2, + "minItems": 2 + }, + "Array_of_NamedUserGroup": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedUserGroup" + } + }, + "NamedUserGroup": { + "type": "object", + "required": [ + "group_name", + "group_users" + ], + "properties": { + "group_name": { + "allOf": [ + { + "$ref": "#/components/schemas/Group" + } + ] + }, + "group_users": { + "type": "array", + "items": { + "$ref": "#/components/schemas/URef" + }, + "uniqueItems": true + } + } + }, + "ContractPackageStatus": { + "description": "A enum to determine the lock status of the contract package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "DeployInfo": { + "description": "Information relating to the given Deploy.", + "type": "object", + "required": [ + "deploy_hash", + "from", + "gas", + "source", + "transfers" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "transfers": { + "description": "Version 1 transfers performed by the Deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "from": { + "description": "Account identifier of the creator of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "source": { + "description": "Source purse used for payment of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "gas": { + "description": "Gas cost of executing the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "TransferAddr": { + "description": "Hex-encoded version 1 transfer address.", + "type": "string" + }, + "EraInfo": { + "description": "Auction metadata. Intended to be recorded at each era.", + "type": "object", + "required": [ + "seigniorage_allocations" + ], + "properties": { + "seigniorage_allocations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeigniorageAllocation" + } + } + }, + "additionalProperties": false + }, + "SeigniorageAllocation": { + "description": "Information about a seigniorage allocation", + "oneOf": [ + { + "description": "Info about a seigniorage allocation for a validator", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "type": "object", + "required": [ + "amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "type": "object", + "required": [ + "amount", + "delegator_public_key", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "Delegator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "DelegatorKind" + ], + "properties": { + "DelegatorKind": { + "type": "object", + "required": [ + "amount", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind", + "allOf": [ + { + "$ref": "#/components/schemas/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "DelegatorKind": { + "description": "Auction bid variants. Kinds of delegation bids.", + "oneOf": [ + { + "description": "Delegation from public key.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "Delegation from purse.", + "type": "object", + "required": [ + "Purse" + ], + "properties": { + "Purse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "Bid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "delegators": { + "description": "This validator's delegators, indexed by their public keys.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_PublicKeyAndDelegator" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\".", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "VestingSchedule": { + "type": "object", + "required": [ + "initial_release_timestamp_millis" + ], + "properties": { + "initial_release_timestamp_millis": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "locked_amounts": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/U512" + }, + "maxItems": 14, + "minItems": 14 + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndDelegator": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKeyAndDelegator" + } + }, + "PublicKeyAndDelegator": { + "description": "A delegator associated with the given validator.", + "type": "object", + "required": [ + "delegator", + "delegator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "The public key of the delegator.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "delegator": { + "description": "The delegator details.", + "allOf": [ + { + "$ref": "#/components/schemas/Delegator" + } + ] + } + } + }, + "Delegator": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_public_key", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "WithdrawPurse": { + "description": "A withdraw purse, a legacy structure.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "UnbondingPurse": { + "description": "Unbonding purse.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "AddressableEntity": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "action_thresholds", + "associated_keys", + "byte_code_hash", + "entity_kind", + "main_purse", + "package_hash", + "protocol_version" + ], + "properties": { + "protocol_version": { + "$ref": "#/components/schemas/ProtocolVersion" + }, + "entity_kind": { + "$ref": "#/components/schemas/EntityKind" + }, + "package_hash": { + "$ref": "#/components/schemas/PackageHash" + }, + "byte_code_hash": { + "$ref": "#/components/schemas/ByteCodeHash" + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "$ref": "#/components/schemas/EntityAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/components/schemas/EntityActionThresholds" + } + } + }, + "EntityKind": { + "description": "The type of Package.", + "oneOf": [ + { + "description": "Package associated with a native contract implementation.", + "type": "object", + "required": [ + "System" + ], + "properties": { + "System": { + "$ref": "#/components/schemas/SystemEntityType" + } + }, + "additionalProperties": false + }, + { + "description": "Package associated with an Account hash.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/components/schemas/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Packages associated with Wasm stored on chain.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/components/schemas/ContractRuntimeTag" + } + }, + "additionalProperties": false + } + ] + }, + "SystemEntityType": { + "description": "System contract types.\n\nUsed by converting to a `u32` and passing as the `system_contract_index` argument of `ext_ffi::casper_get_system_contract()`.", + "oneOf": [ + { + "description": "Mint contract.", + "type": "string", + "enum": [ + "Mint" + ] + }, + { + "description": "Handle Payment contract.", + "type": "string", + "enum": [ + "HandlePayment" + ] + }, + { + "description": "Standard Payment contract.", + "type": "string", + "enum": [ + "StandardPayment" + ] + }, + { + "description": "Auction contract.", + "type": "string", + "enum": [ + "Auction" + ] + } + ] + }, + "ContractRuntimeTag": { + "description": "Runtime used to execute a Transaction.", + "type": "string", + "enum": [ + "VmCasperV1", + "VmCasperV2" + ] + }, + "PackageHash": { + "description": "The hex-encoded address of the Package.", + "type": "string" + }, + "ByteCodeHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntityAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_AssociatedKey" + } + ] + }, + "EntityActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management", + "upgrade_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "upgrade_management": { + "description": "Threshold for upgrading contracts.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/components/schemas/EntityAssociatedKeyWeight" + } + ] + } + } + }, + "EntityAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "BidKind": { + "description": "Auction bid variants.", + "oneOf": [ + { + "description": "A unified record indexed on validator data, with an embedded collection of all delegator bids assigned to that validator. The Unified variant is for legacy retrograde support, new instances will not be created going forward.", + "type": "object", + "required": [ + "Unified" + ], + "properties": { + "Unified": { + "$ref": "#/components/schemas/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only validator data.", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/components/schemas/ValidatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only delegator data.", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "$ref": "#/components/schemas/DelegatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "Bridge" + ], + "properties": { + "Bridge": { + "$ref": "#/components/schemas/Bridge" + } + }, + "additionalProperties": false + }, + { + "description": "Credited amount.", + "type": "object", + "required": [ + "Credit" + ], + "properties": { + "Credit": { + "$ref": "#/components/schemas/ValidatorCredit" + } + }, + "additionalProperties": false + }, + { + "description": "Reservation", + "type": "object", + "required": [ + "Reservation" + ], + "properties": { + "Reservation": { + "$ref": "#/components/schemas/Reservation" + } + }, + "additionalProperties": false + }, + { + "description": "Unbond", + "type": "object", + "required": [ + "Unbond" + ], + "properties": { + "Unbond": { + "$ref": "#/components/schemas/Unbond" + } + }, + "additionalProperties": false + } + ] + }, + "ValidatorBid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "inactive", + "maximum_delegation_amount", + "minimum_delegation_amount", + "reserved_slots", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\"", + "type": "boolean" + }, + "minimum_delegation_amount": { + "description": "Minimum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "maximum_delegation_amount": { + "description": "Maximum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "reserved_slots": { + "description": "Slots reserved for specific delegators", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "DelegatorBid": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_kind", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "$ref": "#/components/schemas/DelegatorKind" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Bridge": { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "era_id", + "new_validator_public_key", + "old_validator_public_key" + ], + "properties": { + "old_validator_public_key": { + "description": "Previous validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "new_validator_public_key": { + "description": "New validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_id": { + "description": "Era when bridge record was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorCredit": { + "description": "Validator credit record.", + "type": "object", + "required": [ + "amount", + "era_id", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_id": { + "description": "The era id the credit was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "The credit amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "Reservation": { + "description": "Represents a validator reserving a slot for specific delegator", + "type": "object", + "required": [ + "delegation_rate", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind.", + "allOf": [ + { + "$ref": "#/components/schemas/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "delegation_rate": { + "description": "Individual delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Unbond": { + "type": "object", + "required": [ + "eras", + "unbond_kind", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbond_kind": { + "description": "Unbond kind.", + "allOf": [ + { + "$ref": "#/components/schemas/UnbondKind" + } + ] + }, + "eras": { + "description": "Unbond amounts per era.", + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondEra" + } + } + }, + "additionalProperties": false + }, + "UnbondKind": { + "description": "Unbond variants.", + "oneOf": [ + { + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPublicKey" + ], + "properties": { + "DelegatedPublicKey": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPurse" + ], + "properties": { + "DelegatedPurse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "UnbondEra": { + "description": "Unbond amounts per era.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Package": { + "description": "Entity definition, metadata, and security container.", + "type": "object", + "required": [ + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "versions": { + "description": "All versions (enabled & disabled).", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_EntityVersionAndHash" + } + ] + }, + "disabled_versions": { + "description": "Collection of disabled entity versions. The runtime will not permit disabled entity versions to be executed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the entity. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/components/schemas/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a entity is locked", + "allOf": [ + { + "$ref": "#/components/schemas/PackageStatus" + } + ] + } + } + }, + "Array_of_EntityVersionAndHash": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EntityVersionAndHash" + } + }, + "EntityVersionAndHash": { + "type": "object", + "required": [ + "addressable_entity_hash", + "entity_version_key" + ], + "properties": { + "entity_version_key": { + "allOf": [ + { + "$ref": "#/components/schemas/EntityVersionKey" + } + ] + }, + "addressable_entity_hash": { + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntityHash" + } + ] + } + } + }, + "EntityVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `EntityVersion`.", + "type": "object", + "required": [ + "entity_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "description": "Major element of `ProtocolVersion` a `ContractVersion` is compatible with.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "entity_version": { + "description": "Automatically incremented value for a contract version within a major `ProtocolVersion`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + } + }, + "AddressableEntityHash": { + "description": "The hex-encoded address of the addressable entity.", + "type": "string" + }, + "PackageStatus": { + "description": "A enum to determine the lock status of the package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "ByteCode": { + "description": "A container for contract's Wasm bytes.", + "type": "object", + "required": [ + "bytes", + "kind" + ], + "properties": { + "kind": { + "$ref": "#/components/schemas/ByteCodeKind" + }, + "bytes": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "ByteCodeKind": { + "description": "The type of Byte code.", + "oneOf": [ + { + "description": "Empty byte code.", + "type": "string", + "enum": [ + "Empty" + ] + }, + { + "description": "Byte code to be executed with the version 1 Casper execution engine.", + "type": "string", + "enum": [ + "V1CasperWasm" + ] + }, + { + "description": "Byte code to be executed with the version 2 Casper execution engine.", + "type": "string", + "enum": [ + "V2CasperWasm" + ] + } + ] + }, + "MessageTopicSummary": { + "description": "Summary of a message topic that will be stored in global state.", + "type": "object", + "required": [ + "blocktime", + "message_count", + "topic_name" + ], + "properties": { + "message_count": { + "description": "Number of messages in this topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "blocktime": { + "description": "Block timestamp in which these messages were emitted.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockTime" + } + ] + }, + "topic_name": { + "description": "Name of the topic.", + "type": "string" + } + } + }, + "BlockTime": { + "description": "A newtype wrapping a [`u64`] which represents the block time.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "MessageChecksum": { + "description": "Message checksum as a formatted string.", + "type": "string" + }, + "NamedKeyValue": { + "description": "A NamedKey value.", + "type": "object", + "required": [ + "name", + "named_key" + ], + "properties": { + "named_key": { + "description": "The actual `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/components/schemas/CLValue" + } + ] + }, + "name": { + "description": "The name of the `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/components/schemas/CLValue" + } + ] + } + } + }, + "PrepaymentKind": { + "description": "Container for bytes recording location, type and data for a gas pre payment", + "type": "object", + "required": [ + "prepayment_data", + "prepayment_kind", + "receipt" + ], + "properties": { + "receipt": { + "$ref": "#/components/schemas/Digest" + }, + "prepayment_kind": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "prepayment_data": { + "$ref": "#/components/schemas/Bytes" + } + } + }, + "EntryPointValue": { + "description": "The encaspulated representation of entrypoints.", + "oneOf": [ + { + "description": "Entrypoints to be executed against the V1 Casper VM.", + "type": "object", + "required": [ + "V1CasperVm" + ], + "properties": { + "V1CasperVm": { + "$ref": "#/components/schemas/EntryPoint2" + } + }, + "additionalProperties": false + } + ] + }, + "EntryPoint2": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_payment", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + }, + "entry_point_payment": { + "$ref": "#/components/schemas/EntryPointPayment" + } + } + }, + "EntryPointPayment": { + "description": "An enum specifying who pays for the invocation and execution of the entrypoint.", + "oneOf": [ + { + "description": "The caller must cover costs", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Will cover costs if directly invoked.", + "type": "string", + "enum": [ + "DirectInvocationOnly" + ] + }, + { + "description": "will cover costs to execute self including any subsequent invoked contracts", + "type": "string", + "enum": [ + "SelfOnward" + ] + } + ] + }, + "U128": { + "description": "Decimal representation of a 128-bit integer.", + "type": "string" + }, + "U256": { + "description": "Decimal representation of a 256-bit integer.", + "type": "string" + }, + "TransformError": { + "description": "Error type for applying and combining transforms.\n\nA `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible (e.g. trying to add a number to a string).", + "oneOf": [ + { + "description": "Error while (de)serializing data.", + "type": "object", + "required": [ + "Serialization" + ], + "properties": { + "Serialization": { + "$ref": "#/components/schemas/BytesreprError" + } + }, + "additionalProperties": false + }, + { + "description": "Type mismatch error.", + "type": "object", + "required": [ + "TypeMismatch" + ], + "properties": { + "TypeMismatch": { + "$ref": "#/components/schemas/TypeMismatch" + } + }, + "additionalProperties": false + }, + { + "description": "Type no longer supported.", + "type": "string", + "enum": [ + "Deprecated" + ] + } + ] + }, + "BytesreprError": { + "description": "Serialization and deserialization errors.", + "oneOf": [ + { + "description": "Early end of stream while deserializing.", + "type": "string", + "enum": [ + "EarlyEndOfStream" + ] + }, + { + "description": "Formatting error while deserializing.", + "type": "string", + "enum": [ + "Formatting" + ] + }, + { + "description": "Not all input bytes were consumed in [`deserialize`].", + "type": "string", + "enum": [ + "LeftOverBytes" + ] + }, + { + "description": "Out of memory error.", + "type": "string", + "enum": [ + "OutOfMemory" + ] + }, + { + "description": "No serialized representation is available for a value.", + "type": "string", + "enum": [ + "NotRepresentable" + ] + }, + { + "description": "Exceeded a recursion depth limit.", + "type": "string", + "enum": [ + "ExceededRecursionDepth" + ] + } + ] + }, + "TypeMismatch": { + "description": "An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.", + "type": "object", + "required": [ + "expected", + "found" + ], + "properties": { + "expected": { + "description": "The name of the expected type.", + "type": "string" + }, + "found": { + "description": "The actual type found.", + "type": "string" + } + } + }, + "Message": { + "description": "Message that was emitted by an addressable entity during execution.", + "type": "object", + "required": [ + "block_index", + "hash_addr", + "message", + "topic_index", + "topic_name", + "topic_name_hash" + ], + "properties": { + "hash_addr": { + "description": "The identity of the entity that produced the message.", + "type": "string" + }, + "message": { + "description": "The payload of the message.", + "allOf": [ + { + "$ref": "#/components/schemas/MessagePayload" + } + ] + }, + "topic_name": { + "description": "The name of the topic on which the message was emitted on.", + "type": "string" + }, + "topic_name_hash": { + "description": "The hash of the name of the topic.", + "allOf": [ + { + "$ref": "#/components/schemas/TopicNameHash" + } + ] + }, + "topic_index": { + "description": "Message index in the topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "block_index": { + "description": "Message index in the block.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "MessagePayload": { + "description": "The payload of the message emitted by an addressable entity during execution.", + "oneOf": [ + { + "description": "Human readable string message.", + "type": "object", + "required": [ + "String" + ], + "properties": { + "String": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Message represented as raw bytes.", + "type": "object", + "required": [ + "Bytes" + ], + "properties": { + "Bytes": { + "$ref": "#/components/schemas/Bytes" + } + }, + "additionalProperties": false + } + ] + }, + "TopicNameHash": { + "description": "The hash of the name of the message topic.", + "type": "string" + }, + "Transaction": { + "description": "A versioned wrapper for a transaction or deploy.", + "oneOf": [ + { + "description": "A deploy.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/Deploy" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/components/schemas/TransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1": { + "description": "A unit of work sent by a client to the network, which when executed can cause global state to be altered.", + "type": "object", + "required": [ + "approvals", + "hash", + "payload" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/TransactionV1Hash" + }, + "payload": { + "$ref": "#/components/schemas/TransactionV1Payload" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Approval" + }, + "uniqueItems": true + } + } + }, + "TransactionV1Payload": { + "description": "Internal payload of the transaction. The actual data over which the signing is done.", + "type": "object", + "required": [ + "chain_name", + "fields", + "initiator_addr", + "pricing_mode", + "timestamp", + "ttl" + ], + "properties": { + "initiator_addr": { + "$ref": "#/components/schemas/InitiatorAddr" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "chain_name": { + "type": "string" + }, + "pricing_mode": { + "$ref": "#/components/schemas/PricingMode" + }, + "fields": { + "type": "object", + "additionalProperties": true + } + }, + "additionalProperties": false + }, + "PricingMode": { + "description": "Pricing mode of a Transaction.", + "oneOf": [ + { + "description": "The original payment model, where the creator of the transaction specifies how much they will pay, at what gas price.", + "type": "object", + "required": [ + "PaymentLimited" + ], + "properties": { + "PaymentLimited": { + "type": "object", + "required": [ + "gas_price_tolerance", + "payment_amount", + "standard_payment" + ], + "properties": { + "payment_amount": { + "description": "User-specified payment amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "standard_payment": { + "description": "Standard payment.", + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The cost of the transaction is determined by the cost table, per the transaction category.", + "type": "object", + "required": [ + "Fixed" + ], + "properties": { + "Fixed": { + "type": "object", + "required": [ + "additional_computation_factor", + "gas_price_tolerance" + ], + "properties": { + "additional_computation_factor": { + "description": "User-specified additional computation factor (minimum 0). If \"0\" is provided, no additional logic is applied to the computation limit. Each value above \"0\" tells the node that it needs to treat the transaction as if it uses more gas than it's serialized size indicates. Each \"1\" will increase the \"wasm lane\" size bucket for this transaction by 1. So if the size of the transaction indicates bucket \"0\" and \"additional_computation_factor = 2\", the transaction will be treated as a \"2\".", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The payment for this transaction was previously paid, as proven by the receipt hash (this is for future use, not currently implemented).", + "type": "object", + "required": [ + "Prepaid" + ], + "properties": { + "Prepaid": { + "type": "object", + "required": [ + "receipt" + ], + "properties": { + "receipt": { + "description": "Pre-paid receipt.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + } + } + } +} \ No newline at end of file diff --git a/rpc_sidecar/Cargo.toml b/rpc_sidecar/Cargo.toml new file mode 100644 index 00000000..2aa511f3 --- /dev/null +++ b/rpc_sidecar/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "casper-rpc-sidecar" +version = "1.0.0" +authors = ["Jacek Malec "] +edition = "2018" +description = "The Casper blockchain RPC sidecard" +documentation = "https://docs.rs/casper-rpc-sidecar" +readme = "README.md" +homepage = "https://casperlabs.io" +repository = "https://github.com/CasperLabs/casper-node/tree/master/rpc_sidecard" +license = "Apache-2.0" + +[dependencies] +anyhow = { workspace = true } +async-trait = "0.1.50" +backtrace = "0.3.50" +base16 = "0.2.1" +bincode = "1" +bytes = "1.5.0" +casper-json-rpc = { version = "1.0.0", path = "../json_rpc" } +casper-types = { workspace = true, features = ["datasize", "json-schema", "std", "std-fs-io"] } +casper-binary-port.workspace = true +datasize = { workspace = true, features = ["detailed", "fake_clock-types"] } +futures = { workspace = true } +http = "0.2.1" +hyper = "0.14.26" +juliet = { version ="0.3", features = ["tracing"] } +metrics = { workspace = true } +num_cpus = "1" +num-traits = { version = "0.2.10", default-features = false } +once_cell.workspace = true +portpicker = "0.1.1" +rand = "0.8.3" +schemars = { version = "0.8.21", features = ["preserve_order", "impl_json_schema"] } +serde = { workspace = true, default-features = true, features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +serde-map-to-array = "1.1.0" +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-util = { version = "0.6.4", features = ["codec"] } +toml = { workspace = true } +tower = { version = "0.4.6", features = ["limit"] } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] } +warp = { version = "0.3.6", features = ["compression"] } +derive-new = "0.6.0" + +[dev-dependencies] +assert-json-diff = "2" +casper-types = { workspace = true, features = ["datasize", "json-schema", "std", "std-fs-io", "testing"] } +casper-binary-port = { workspace = true, features = ["testing"] } +pretty_assertions = "1" +regex = "1" +tempfile = "3" +tokio = { workspace = true, features = ["test-util"] } +jsonschema = { workspace = true } + +[build-dependencies] +vergen = { version = "8.3.2", default-features = false, features = [ + "git", + "gitoxide", +] } + +[features] +testing = ["casper-types/testing", "casper-binary-port/testing"] + +[package.metadata.deb] +revision = "0" +assets = [ + ["../target/release/casper-rpc-sidecar", "/usr/bin/casper-rpc-sidecar", "755"] +] +maintainer-scripts = "../resources/maintainer_scripts/debian" +extended-description = """ +Package for Casper RPC sidecar. + +For information on using package, see https://github.com/casper-network/casper-node +""" + +[package.metadata.deb.systemd-units] +unit-scripts = "../resources/maintainer_scripts/casper_rpc_sidecar" +restart-after-upgrade = false diff --git a/rpc_sidecar/README.md b/rpc_sidecar/README.md new file mode 100644 index 00000000..423d4c93 --- /dev/null +++ b/rpc_sidecar/README.md @@ -0,0 +1,33 @@ +# rpc-sidecar + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Build Status](https://drone-auto-casper-network.casperlabs.io/api/badges/casper-network/casper-node/status.svg?branch=dev)](http://drone-auto-casper-network.casperlabs.io/casper-network/casper-node) +[![Crates.io](https://img.shields.io/crates/v/casper-rpc-sidecar)](https://crates.io/crates/casper-rpc-sidecar) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) + +## Synopsis + +The Casper Sidecar provides connectivity to the binary port of a Casper node (among [other capabilities](../README.md#system-components-and-architecture)), exposing a JSON-RPC interface for interacting with that node. The RPC protocol allows for basic operations like querying global state, sending transactions and deploys, etc. All of the available RPC methods are documented [here](https://docs.casper.network/developers/json-rpc/). + +## Protocol + +The Sidecar maintains a TCP connection with the node and communicates using a custom binary protocol, which uses a request-response model. The Sidecar sends simple self-contained requests and the node responds to them. The requests can be split into these main categories: +- Read requests + - Queries for transient in-memory information like the current block height, peer list, component status etc. + - Queries for database items, with both the database and the key always being explicitly specified by the sidecar +- Transaction requests + - Requests to submit transactions for execution + - Requests to speculatively execute a transactions + +## Discovering the JSON RPC API + +Once setup and running as described [here](../README.md), the Sidecar can be queried for its JSON-RPC API using the `rpc.discover` method, as shown below. The result will be a list of RPC methods and their parameters. + +```sh +curl -X POST http://localhost:/rpc -H 'Content-Type: application/json' -d '{"jsonrpc": "2.0", "method": "rpc.discover", "id": 1}' +``` + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/rpc_sidecar/build.rs b/rpc_sidecar/build.rs new file mode 100644 index 00000000..820ad1ce --- /dev/null +++ b/rpc_sidecar/build.rs @@ -0,0 +1,16 @@ +use std::env; + +use vergen::EmitBuilder; + +fn main() { + if let Err(error) = EmitBuilder::builder().fail_on_error().git_sha(true).emit() { + println!("cargo:warning={}", error); + println!("cargo:warning=casper-rpc-sidecar build version will not include git short hash"); + } + + // Make the build profile available to rustc at compile time. + println!( + "cargo:rustc-env=SIDECAR_BUILD_PROFILE={}", + env::var("PROFILE").unwrap() + ); +} diff --git a/rpc_sidecar/src/config.rs b/rpc_sidecar/src/config.rs new file mode 100644 index 00000000..7ac5f730 --- /dev/null +++ b/rpc_sidecar/src/config.rs @@ -0,0 +1,203 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use datasize::DataSize; +use serde::Deserialize; +use thiserror::Error; + +use crate::SpeculativeExecConfig; + +/// Default binding address for the JSON-RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); +const DEFAULT_PORT: u16 = 0; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 100; +/// Default max body bytes. This is 2.5MB which should be able to accommodate the largest valid +/// JSON-RPC request, which would be an "account_put_deploy". +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +#[derive(Error, Debug)] +pub enum FieldParseError { + #[error("failed to parse field {} with error: {}", .field_name, .error)] + ParseError { + field_name: &'static str, + error: String, + }, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub struct RpcServerConfig { + pub main_server: RpcConfig, + pub speculative_exec_server: Option, + pub node_client: NodeClientConfig, +} + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct RpcConfig { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// IP address to bind JSON-RPC HTTP server to. + pub ip_address: IpAddr, + /// TCP port to bind JSON-RPC HTTP server to. + pub port: u16, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl RpcConfig { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + RpcConfig { + enable_server: true, + ip_address: DEFAULT_IP_ADDRESS, + port: DEFAULT_PORT, + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for RpcConfig { + fn default() -> Self { + RpcConfig::new() + } +} + +/// Default address to connect to the node. +// Change this to SocketAddr, once SocketAddr::new is const stable. +const DEFAULT_NODE_CONNECT_IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); +const DEFAULT_NODE_CONNECT_PORT: u16 = 28104; +/// Default maximum payload size. +const DEFAULT_MAX_PAYLOAD_SIZE: u32 = 4 * 1024 * 1024; +/// Default message timeout in seconds. +const DEFAULT_MESSAGE_TIMEOUT_SECS: u64 = 30; +/// Default timeout for client access. +const DEFAULT_CLIENT_ACCESS_TIMEOUT_SECS: u64 = 10; +/// Default exponential backoff base delay. +const DEFAULT_EXPONENTIAL_BACKOFF_BASE_MS: u64 = 1000; +/// Default exponential backoff maximum delay. +const DEFAULT_EXPONENTIAL_BACKOFF_MAX_MS: u64 = 64_000; +/// Default exponential backoff coefficient. +const DEFAULT_EXPONENTIAL_BACKOFF_COEFFICIENT: u64 = 2; +/// Default keep alive timeout milliseconds. +const DEFAULT_KEEPALIVE_TIMEOUT_MS: u64 = 1_000; +/// Default max attempts +const DEFAULT_EXPONENTIAL_BACKOFF_MAX_ATTEMPTS: u32 = 3; + +/// Node client configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct NodeClientConfig { + /// IP address of the node. + pub ip_address: IpAddr, + /// Port of the node. + pub port: u16, + /// Maximum size of a message in bytes. + pub max_message_size_bytes: u32, + /// Message transfer timeout in seconds. + pub message_timeout_secs: u64, + /// Timeout specifying how long to wait for binary port client to be available. + // Access to the client is synchronized. + pub client_access_timeout_secs: u64, + /// The amount of ms to wait between sending keepalive requests. + pub keepalive_timeout_ms: u64, + /// Configuration for exponential backoff to be used for re-connects. + pub exponential_backoff: ExponentialBackoffConfig, +} + +impl NodeClientConfig { + /// Creates a default instance for `NodeClientConfig`. + pub fn new() -> Self { + NodeClientConfig { + ip_address: DEFAULT_NODE_CONNECT_IP_ADDRESS, + port: DEFAULT_NODE_CONNECT_PORT, + max_message_size_bytes: DEFAULT_MAX_PAYLOAD_SIZE, + message_timeout_secs: DEFAULT_MESSAGE_TIMEOUT_SECS, + client_access_timeout_secs: DEFAULT_CLIENT_ACCESS_TIMEOUT_SECS, + keepalive_timeout_ms: DEFAULT_KEEPALIVE_TIMEOUT_MS, + exponential_backoff: ExponentialBackoffConfig { + initial_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_BASE_MS, + max_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_MAX_MS, + coefficient: DEFAULT_EXPONENTIAL_BACKOFF_COEFFICIENT, + max_attempts: DEFAULT_EXPONENTIAL_BACKOFF_MAX_ATTEMPTS, + }, + } + } + + /// Creates an instance of `NodeClientConfig` with specified listening port. + #[cfg(any(feature = "testing", test))] + pub fn new_with_port(port: u16) -> Self { + let localhost = IpAddr::V4(Ipv4Addr::LOCALHOST); + NodeClientConfig { + ip_address: localhost, + port, + max_message_size_bytes: DEFAULT_MAX_PAYLOAD_SIZE, + message_timeout_secs: DEFAULT_MESSAGE_TIMEOUT_SECS, + client_access_timeout_secs: DEFAULT_CLIENT_ACCESS_TIMEOUT_SECS, + keepalive_timeout_ms: DEFAULT_KEEPALIVE_TIMEOUT_MS, + exponential_backoff: ExponentialBackoffConfig { + initial_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_BASE_MS, + max_delay_ms: DEFAULT_EXPONENTIAL_BACKOFF_MAX_MS, + coefficient: DEFAULT_EXPONENTIAL_BACKOFF_COEFFICIENT, + max_attempts: DEFAULT_EXPONENTIAL_BACKOFF_MAX_ATTEMPTS, + }, + } + } + + /// Creates an instance of `NodeClientConfig` with specified listening port and maximum number + /// of reconnection retries. + #[cfg(any(feature = "testing", test))] + pub fn new_with_port_and_retries(port: u16, num_of_retries: u32) -> Self { + let localhost = IpAddr::V4(Ipv4Addr::LOCALHOST); + NodeClientConfig { + ip_address: localhost, + port, + max_message_size_bytes: DEFAULT_MAX_PAYLOAD_SIZE, + message_timeout_secs: DEFAULT_MESSAGE_TIMEOUT_SECS, + client_access_timeout_secs: DEFAULT_CLIENT_ACCESS_TIMEOUT_SECS, + keepalive_timeout_ms: DEFAULT_KEEPALIVE_TIMEOUT_MS, + exponential_backoff: ExponentialBackoffConfig { + initial_delay_ms: 500, + max_delay_ms: 3000, + coefficient: 3, + max_attempts: num_of_retries, + }, + } + } +} + +impl Default for NodeClientConfig { + fn default() -> Self { + Self::new() + } +} + +/// Exponential backoff configuration for re-connects. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct ExponentialBackoffConfig { + /// Initial wait time before the first re-connect attempt. + pub initial_delay_ms: u64, + /// Maximum wait time between re-connect attempts. + pub max_delay_ms: u64, + /// The multiplier to apply to the previous delay to get the next delay. + pub coefficient: u64, + /// Maximum number of connection attempts. + pub max_attempts: u32, +} diff --git a/rpc_sidecar/src/http_server.rs b/rpc_sidecar/src/http_server.rs new file mode 100644 index 00000000..7b3298ea --- /dev/null +++ b/rpc_sidecar/src/http_server.rs @@ -0,0 +1,110 @@ +use std::sync::Arc; + +use hyper::server::{conn::AddrIncoming, Builder}; + +use casper_json_rpc::{CorsOrigin, RequestHandlersBuilder}; + +use crate::{ + rpcs::{ + info::{GetPeers, GetReward, GetStatus, GetTransaction}, + state::{GetAddressableEntity, GetPackage, QueryBalanceDetails}, + }, + NodeClient, +}; + +use super::rpcs::{ + account::{PutDeploy, PutTransaction}, + chain::{ + GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, + }, + docs::RpcDiscover, + info::{GetChainspec, GetDeploy, GetValidatorChanges}, + state::{ + GetAccountInfo, GetAuctionInfo, GetBalance, GetDictionaryItem, GetItem, GetTrie, + QueryBalance, QueryGlobalState, + }, + state_get_auction_info_v2::GetAuctionInfo as GetAuctionInfoV2, + RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, +}; + +/// The URL path for all JSON-RPC requests. +pub const RPC_API_PATH: &str = "rpc"; + +pub const RPC_API_SERVER_NAME: &str = "JSON RPC"; + +/// Run the JSON-RPC server. +pub async fn run( + node: Arc, + builder: Builder, + qps_limit: u64, + max_body_bytes: u32, + cors_origin: String, +) { + let mut handlers = RequestHandlersBuilder::new(); + PutDeploy::register_as_handler(node.clone(), &mut handlers); + PutTransaction::register_as_handler(node.clone(), &mut handlers); + GetBlock::register_as_handler(node.clone(), &mut handlers); + GetBlockTransfers::register_as_handler(node.clone(), &mut handlers); + GetStateRootHash::register_as_handler(node.clone(), &mut handlers); + GetItem::register_as_handler(node.clone(), &mut handlers); + QueryGlobalState::register_as_handler(node.clone(), &mut handlers); + GetBalance::register_as_handler(node.clone(), &mut handlers); + GetAccountInfo::register_as_handler(node.clone(), &mut handlers); + GetAddressableEntity::register_as_handler(node.clone(), &mut handlers); + GetPackage::register_as_handler(node.clone(), &mut handlers); + GetDeploy::register_as_handler(node.clone(), &mut handlers); + GetTransaction::register_as_handler(node.clone(), &mut handlers); + GetPeers::register_as_handler(node.clone(), &mut handlers); + GetStatus::register_as_handler(node.clone(), &mut handlers); + GetReward::register_as_handler(node.clone(), &mut handlers); + GetEraInfoBySwitchBlock::register_as_handler(node.clone(), &mut handlers); + GetEraSummary::register_as_handler(node.clone(), &mut handlers); + GetAuctionInfo::register_as_handler(node.clone(), &mut handlers); + GetAuctionInfoV2::register_as_handler(node.clone(), &mut handlers); + GetTrie::register_as_handler(node.clone(), &mut handlers); + GetValidatorChanges::register_as_handler(node.clone(), &mut handlers); + RpcDiscover::register_as_handler(node.clone(), &mut handlers); + GetDictionaryItem::register_as_handler(node.clone(), &mut handlers); + GetChainspec::register_as_handler(node.clone(), &mut handlers); + QueryBalance::register_as_handler(node.clone(), &mut handlers); + QueryBalanceDetails::register_as_handler(node, &mut handlers); + let handlers = handlers.build(); + + match cors_origin.as_str() { + "" => { + super::rpcs::run( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + ) + .await + } + "*" => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + CorsOrigin::Any, + ) + .await + } + _ => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + RPC_API_PATH, + RPC_API_SERVER_NAME, + CorsOrigin::Specified(cors_origin), + ) + .await + } + } +} diff --git a/rpc_sidecar/src/lib.rs b/rpc_sidecar/src/lib.rs new file mode 100644 index 00000000..189c2eac --- /dev/null +++ b/rpc_sidecar/src/lib.rs @@ -0,0 +1,285 @@ +mod config; +mod http_server; +mod node_client; +mod rpcs; +mod speculative_exec_config; +mod speculative_exec_server; +#[cfg(any(feature = "testing", test))] +pub mod testing; + +use anyhow::Error; +use casper_binary_port::{Command, CommandHeader}; +use casper_types::{ + bytesrepr::{self, ToBytes}, + ProtocolVersion, +}; +pub use config::{FieldParseError, NodeClientConfig, RpcConfig, RpcServerConfig}; +use futures::{future::BoxFuture, FutureExt}; +pub use http_server::run as run_rpc_server; +use hyper::{ + server::{conn::AddrIncoming, Builder as ServerBuilder}, + Server, +}; +use node_client::FramedNodeClient; +pub use node_client::{Error as ClientError, NodeClient}; +pub use speculative_exec_config::Config as SpeculativeExecConfig; +pub use speculative_exec_server::run as run_speculative_exec_server; +use std::{net::SocketAddr, process::ExitCode, sync::Arc}; +use tracing::{error, warn}; +/// Minimal casper protocol version supported by this sidecar. +pub const SUPPORTED_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(2, 0, 0); + +/// The exit code is used to indicate that the client has shut down due to version mismatch. +pub const CLIENT_SHUTDOWN_EXIT_CODE: u8 = 0x3; + +pub type MaybeRpcServerReturn<'a> = Result>>, Error>; +pub async fn build_rpc_server<'a>( + config: RpcServerConfig, + maybe_network_name: Option, +) -> MaybeRpcServerReturn<'a> { + let (node_client, reconnect_loop, keepalive_loop) = + FramedNodeClient::new(config.node_client.clone(), maybe_network_name).await?; + let node_client: Arc = node_client; + let mut futures = Vec::new(); + let main_server_config = config.main_server; + if main_server_config.enable_server { + let future = run_rpc(main_server_config, node_client.clone()) + .map(|q| { + if let Err(e) = q { + error!("Rpc server finished with error: {}", e); + } + Ok(ExitCode::SUCCESS) + }) + .boxed(); + futures.push(future); + } + let speculative_server_config = config.speculative_exec_server; + if let Some(config) = speculative_server_config { + if config.enable_server { + let future = run_speculative_exec(config, node_client.clone()) + .map(|q| { + if let Err(e) = q { + error!("Rpc speculative server finished with error: {}", e); + } + Ok(ExitCode::SUCCESS) + }) + .boxed(); + futures.push(future); + } + } + let reconnect_loop = reconnect_loop + .map(|q| { + if let Err(e) = q { + error!("reconect_loop finished with error: {}", e); + } + Ok(ExitCode::from(CLIENT_SHUTDOWN_EXIT_CODE)) + }) + .boxed(); + futures.push(reconnect_loop); + let keepalive_loop = keepalive_loop + .map(|q| { + if let Err(e) = q { + error!("keepalive_loop finished with error: {}", e); + } + Ok(ExitCode::from(CLIENT_SHUTDOWN_EXIT_CODE)) + }) + .boxed(); + futures.push(keepalive_loop); + Ok(Some(retype_future_vec(futures).boxed())) +} + +async fn retype_future_vec( + futures: Vec>>, +) -> Result { + futures::future::select_all(futures).await.0 +} + +async fn run_rpc(config: RpcConfig, node_client: Arc) -> Result<(), Error> { + run_rpc_server( + node_client, + start_listening(&SocketAddr::new(config.ip_address, config.port))?, + config.qps_limit, + config.max_body_bytes, + config.cors_origin.clone(), + ) + .await; + Ok(()) +} + +async fn run_speculative_exec( + config: SpeculativeExecConfig, + node_client: Arc, +) -> anyhow::Result<()> { + run_speculative_exec_server( + node_client, + start_listening(&SocketAddr::new(config.ip_address, config.port))?, + config.qps_limit, + config.max_body_bytes, + config.cors_origin.clone(), + ) + .await; + Ok(()) +} + +fn start_listening(address: &SocketAddr) -> anyhow::Result> { + Server::try_bind(address).map_err(|error| { + warn!(%error, %address, "failed to start HTTP server"); + error.into() + }) +} + +fn encode_request(req: &Command, id: u16) -> Result, bytesrepr::Error> { + let header = CommandHeader::new(req.tag(), id); + let mut bytes = Vec::with_capacity(header.serialized_length() + req.serialized_length()); + header.write_bytes(&mut bytes)?; + req.write_bytes(&mut bytes)?; + Ok(bytes) +} + +#[cfg(test)] +mod tests { + use std::fs; + use std::io::Write; + + use assert_json_diff::{assert_json_eq, assert_json_matches_no_panic, CompareMode, Config}; + use regex::Regex; + use serde_json::Value; + + use crate::rpcs::docs::OPEN_RPC_SCHEMA; + + use crate::rpcs::speculative_open_rpc_schema::SPECULATIVE_OPEN_RPC_SCHEMA; + use crate::rpcs::{ + docs::OpenRpcSchema, + info::{GetChainspecResult, GetStatusResult, GetValidatorChangesResult}, + }; + use schemars::schema_for; + + #[test] + fn main_server_json_schema_check() { + json_schema_check("rpc_schema.json", &OPEN_RPC_SCHEMA); + } + + #[test] + fn speculative_json_schema_check() { + json_schema_check("speculative_rpc_schema.json", &SPECULATIVE_OPEN_RPC_SCHEMA); + } + + #[test] + fn json_schema_status_check() { + let schema_path = format!( + "{}/../resources/test/schema_status.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetStatusResult)).unwrap(), + ); + } + + #[test] + fn json_schema_validator_changes_check() { + let schema_path = format!( + "{}/../resources/test/schema_validator_changes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetValidatorChangesResult)).unwrap(), + ); + } + + #[test] + fn json_schema_rpc_schema_check() { + let schema_path = format!( + "{}/../resources/test/schema_rpc_schema.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(OpenRpcSchema)).unwrap(), + ); + } + + #[test] + fn json_schema_chainspec_bytes_check() { + let schema_path = format!( + "{}/../resources/test/schema_chainspec_bytes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(&schema_for!(GetChainspecResult)).unwrap(), + ); + } + + /// Assert that the file at `schema_path` matches the provided `actual_schema`, which can be + /// derived from `schemars::schema_for!` or `schemars::schema_for_value!`, for example. This + /// method will create a temporary file with the actual schema and print the location if it + /// fails. + pub fn assert_schema(schema_path: &str, actual_schema: &str) { + let expected_schema = fs::read_to_string(schema_path).unwrap(); + let expected_schema: Value = serde_json::from_str(&expected_schema).unwrap(); + let mut temp_file = tempfile::Builder::new() + .suffix(".json") + .tempfile_in(env!("OUT_DIR")) + .unwrap(); + temp_file.write_all(actual_schema.as_bytes()).unwrap(); + + let actual_schema: Value = serde_json::from_str(actual_schema).unwrap(); + let (_file, temp_file_path) = temp_file.keep().unwrap(); + + let result = assert_json_matches_no_panic( + &actual_schema, + &expected_schema, + Config::new(CompareMode::Strict), + ); + assert_eq!( + result, + Ok(()), + "schema does not match:\nexpected:\n{}\nactual:\n{}\n", + schema_path, + temp_file_path.display() + ); + assert_json_eq!(actual_schema, expected_schema); + } + + fn json_schema_check(schema_filename: &str, rpc_schema: &OpenRpcSchema) { + let schema_path = format!( + "{}/../resources/test/{}", + env!("CARGO_MANIFEST_DIR"), + schema_filename, + ); + assert_schema( + &schema_path, + &serde_json::to_string_pretty(rpc_schema).unwrap(), + ); + + let schema = fs::read_to_string(&schema_path).unwrap(); + + // Check for the following pattern in the JSON as this points to a byte array or vec (e.g. + // a hash digest) not being represented as a hex-encoded string: + // + // ```json + // "type": "array", + // "items": { + // "type": "integer", + // "format": "uint8", + // "minimum": 0.0 + // }, + // ``` + // + // The type/variant in question (most easily identified from the git diff) might be easily + // fixed via application of a serde attribute, e.g. + // `#[serde(with = "serde_helpers::raw_32_byte_array")]`. It will likely require a + // schemars attribute too, indicating it is a hex-encoded string. See for example + // `TransactionInvocationTarget::Package::addr`. + let regex = Regex::new( + r#"\s*"type":\s*"array",\s*"items":\s*\{\s*"type":\s*"integer",\s*"format":\s*"uint8",\s*"minimum":\s*0\.0\s*\},"# + ).unwrap(); + assert!( + !regex.is_match(&schema), + "seems like a byte array is not hex-encoded - see comment in `json_schema_check` for \ + further info" + ); + } +} diff --git a/rpc_sidecar/src/node_client.rs b/rpc_sidecar/src/node_client.rs new file mode 100644 index 00000000..a1abd2a0 --- /dev/null +++ b/rpc_sidecar/src/node_client.rs @@ -0,0 +1,1811 @@ +use crate::{config::ExponentialBackoffConfig, encode_request, NodeClientConfig}; +use anyhow::Error as AnyhowError; +use async_trait::async_trait; +use casper_binary_port::{ + AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryMessage, + BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, CommandHeader, + ConsensusValidatorChanges, ContractInformation, DictionaryItemIdentifier, + DictionaryQueryResult, EntityIdentifier, EraIdentifier, ErrorCode, GetRequest, + GetTrieFullResult, GlobalStateEntityQualifier, GlobalStateQueryResult, GlobalStateRequest, + InformationRequest, InformationRequestTag, KeyPrefix, NodeStatus, PackageIdentifier, + PayloadEntity, PurseIdentifier, RecordId, ResponseType, RewardResponse, + SpeculativeExecutionResult, TransactionWithExecutionInfo, ValueWithProof, +}; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + contracts::ContractPackage, + system::auction::DelegatorKind, + AvailableBlockRange, BlockHash, BlockHeader, BlockIdentifier, BlockWithSignatures, + ChainspecRawBytes, Digest, GlobalStateIdentifier, Key, KeyTag, Package, Peers, PublicKey, + StoredValue, Transaction, TransactionHash, Transfer, +}; +use futures::{Future, SinkExt, StreamExt}; +use metrics::rpc::{ + inc_disconnect, observe_reconnect_time, register_mismatched_id, register_timeout, +}; +use serde::de::DeserializeOwned; +use std::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, + net::{IpAddr, SocketAddr}, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; +use tokio::{ + net::TcpStream, + sync::{futures::Notified, RwLock, RwLockWriteGuard}, +}; +use tokio_util::codec::Framed; +use tracing::{error, field, info, warn}; + +const MAX_MISMATCHED_ID_RETRIES: u8 = 100; +#[cfg(not(test))] +const INITIAL_REQUEST_ID: u16 = 0; +#[cfg(test)] +const INITIAL_REQUEST_ID: u16 = 1; + +#[async_trait] +pub trait NodeClient: Send + Sync { + async fn send_request(&self, req: Command) -> Result; + + async fn read_record( + &self, + record_id: RecordId, + key: &[u8], + ) -> Result { + let get = GetRequest::Record { + record_type_tag: record_id.into(), + key: key.to_vec(), + }; + self.send_request(Command::Get(get)).await + } + + async fn read_info(&self, req: InformationRequest) -> Result { + let get = req.try_into().expect("should always be able to convert"); + self.send_request(Command::Get(get)).await + } + + async fn query_global_state( + &self, + state_identifier: Option, + base_key: Key, + path: Vec, + ) -> Result, Error> { + let req = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Item { base_key, path }, + ); + let resp = self + .send_request(Command::Get(GetRequest::State(Box::new(req)))) + .await?; + parse_response::(&resp.into()) + } + + async fn query_global_state_by_tag( + &self, + state_identifier: Option, + key_tag: KeyTag, + ) -> Result, Error> { + let get = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::AllItems { key_tag }, + ); + let resp = self + .send_request(Command::Get(GetRequest::State(Box::new(get)))) + .await?; + parse_response::>(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn query_global_state_by_prefix( + &self, + state_identifier: Option, + key_prefix: KeyPrefix, + ) -> Result, Error> { + let get = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::ItemsByPrefix { key_prefix }, + ); + let resp = self + .send_request(Command::Get(GetRequest::State(Box::new(get)))) + .await?; + parse_response::>(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_balance( + &self, + state_identifier: Option, + purse_identifier: PurseIdentifier, + ) -> Result { + let get = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Balance { purse_identifier }, + ); + let resp = self + .send_request(Command::Get(GetRequest::State(Box::new(get)))) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_trie_bytes(&self, trie_key: Digest) -> Result>, Error> { + let resp = self + .send_request(Command::Get(GetRequest::Trie { trie_key })) + .await?; + let res = parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope)?; + Ok(res.into_inner().map(>::from)) + } + + async fn query_dictionary_item( + &self, + state_identifier: Option, + identifier: DictionaryItemIdentifier, + ) -> Result, Error> { + let get = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::DictionaryItem { identifier }, + ); + let resp = self + .send_request(Command::Get(GetRequest::State(Box::new(get)))) + .await?; + parse_response::(&resp.into()) + } + + async fn try_accept_transaction(&self, transaction: Transaction) -> Result<(), Error> { + let request = Command::TryAcceptTransaction { transaction }; + let response = self.send_request(request).await?; + + if response.is_success() { + return Ok(()); + } else { + return Err(Error::from_error_code(response.error_code())); + } + } + + async fn exec_speculatively( + &self, + transaction: Transaction, + ) -> Result { + let request = Command::TrySpeculativeExec { transaction }; + let resp = self.send_request(request).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_block_transfers(&self, hash: BlockHash) -> Result>, Error> { + let key = hash.to_bytes().expect("should always serialize a digest"); + let resp = self.read_record(RecordId::Transfer, &key).await?; + parse_response_bincode::>(&resp.into()) + } + + async fn read_block_header( + &self, + block_identifier: Option, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::BlockHeader(block_identifier)) + .await?; + parse_response::(&resp.into()) + } + + async fn read_block_with_signatures( + &self, + block_identifier: Option, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::BlockWithSignatures(block_identifier)) + .await?; + parse_response::(&resp.into()) + } + + async fn read_transaction_with_execution_info( + &self, + hash: TransactionHash, + with_finalized_approvals: bool, + ) -> Result, Error> { + let resp = self + .read_info(InformationRequest::Transaction { + hash, + with_finalized_approvals, + }) + .await?; + parse_response::(&resp.into()) + } + + async fn read_peers(&self) -> Result { + let resp = self.read_info(InformationRequest::Peers).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_available_block_range(&self) -> Result { + let resp = self + .read_info(InformationRequest::AvailableBlockRange) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_chainspec_bytes(&self) -> Result { + let resp = self + .read_info(InformationRequest::ChainspecRawBytes) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_validator_changes(&self) -> Result { + let resp = self + .read_info(InformationRequest::ConsensusValidatorChanges) + .await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_latest_switch_block_header(&self) -> Result, Error> { + let resp = self + .read_info(InformationRequest::LatestSwitchBlockHeader) + .await?; + parse_response::(&resp.into()) + } + + async fn read_node_status(&self) -> Result { + let resp = self.read_info(InformationRequest::NodeStatus).await?; + parse_response::(&resp.into())?.ok_or(Error::EmptyEnvelope) + } + + async fn read_reward( + &self, + era_identifier: Option, + validator: PublicKey, + delegator: Option, + ) -> Result, Error> { + let validator = validator.into(); + let delegator = delegator.map(|delegator| Box::new(DelegatorKind::PublicKey(delegator))); + let resp = self + .read_info(InformationRequest::Reward { + era_identifier, + validator, + delegator, + }) + .await?; + parse_response::(&resp.into()) + } + + async fn read_package( + &self, + state_identifier: Option, + identifier: PackageIdentifier, + ) -> Result, Error> { + let get = InformationRequest::Package { + state_identifier, + identifier, + }; + let resp = self.read_info(get).await?; + match resp.response().returned_data_type_tag() { + Some(type_tag) if type_tag == ResponseType::ContractPackageWithProof as u8 => Ok( + parse_response::>(&resp.into())? + .map(PackageResponse::ContractPackage), + ), + _ => Ok(parse_response::>(&resp.into())? + .map(PackageResponse::Package)), + } + } + + async fn read_entity( + &self, + state_identifier: Option, + identifier: EntityIdentifier, + include_bytecode: bool, + ) -> Result, Error> { + let get = InformationRequest::Entity { + state_identifier, + identifier, + include_bytecode, + }; + let resp = self.read_info(get).await?; + match resp.response().returned_data_type_tag() { + Some(type_tag) if type_tag == ResponseType::ContractInformation as u8 => { + Ok(parse_response::(&resp.into())? + .map(EntityResponse::Contract)) + } + Some(type_tag) if type_tag == ResponseType::AccountInformation as u8 => Ok( + parse_response::(&resp.into())?.map(EntityResponse::Account), + ), + _ => Ok( + parse_response::(&resp.into())? + .map(EntityResponse::Entity), + ), + } + } +} + +#[derive(Debug, thiserror::Error, PartialEq, Eq)] + +pub enum InvalidTransactionOrDeploy { + ///The deploy had an invalid chain name + #[error("The deploy had an invalid chain name")] + DeployChainName, + ///Deploy dependencies are no longer supported + #[error("The dependencies for this transaction are no longer supported")] + DeployDependenciesNoLongerSupported, + ///The deploy sent to the network had an excessive size + #[error("The deploy had an excessive size")] + DeployExcessiveSize, + ///The deploy sent to the network had an excessive time to live + #[error("The deploy had an excessive time to live")] + DeployExcessiveTimeToLive, + ///The deploy sent to the network had a timestamp referencing a time that has yet to occur. + #[error("The deploys timestamp is in the future")] + DeployTimestampInFuture, + ///The deploy sent to the network had an invalid body hash + #[error("The deploy had an invalid body hash")] + DeployBodyHash, + ///The deploy sent to the network had an invalid deploy hash i.e. the provided deploy hash + /// didn't match the derived deploy hash + #[error("The deploy had an invalid deploy hash")] + DeployHash, + ///The deploy sent to the network had an empty approval set + #[error("The deploy had no approvals")] + DeployEmptyApprovals, + ///The deploy sent to the network had an invalid approval + #[error("The deploy had an invalid approval")] + DeployApproval, + ///The deploy sent to the network had an excessive session args length + #[error("The deploy had an excessive session args length")] + DeployExcessiveSessionArgsLength, + ///The deploy sent to the network had an excessive payment args length + #[error("The deploy had an excessive payment args length")] + DeployExcessivePaymentArgsLength, + ///The deploy sent to the network had a missing payment amount + #[error("The deploy had a missing payment amount")] + DeployMissingPaymentAmount, + ///The deploy sent to the network had a payment amount that was not parseable + #[error("The deploy sent to the network had a payment amount that was unable to be parsed")] + DeployFailedToParsePaymentAmount, + ///The deploy sent to the network exceeded the block gas limit + #[error("The deploy sent to the network exceeded the block gas limit")] + DeployExceededBlockGasLimit, + ///The deploy sent to the network was missing a transfer amount + #[error("The deploy sent to the network was missing a transfer amount")] + DeployMissingTransferAmount, + ///The deploy sent to the network had a transfer amount that was unable to be parseable + #[error("The deploy sent to the network had a transfer amount that was unable to be parsed")] + DeployFailedToParseTransferAmount, + ///The deploy sent to the network had a transfer amount that was insufficient + #[error("The deploy sent to the network had an insufficient transfer amount")] + DeployInsufficientTransferAmount, + ///The deploy sent to the network had excessive approvals + #[error("The deploy sent to the network had excessive approvals")] + DeployExcessiveApprovals, + ///The network was unable to calculate the gas limit for the deploy + #[error("The network was unable to calculate the gas limit associated with the deploy")] + DeployUnableToCalculateGasLimit, + ///The network was unable to calculate the gas cost for the deploy + #[error("The network was unable to calculate the gas cost for the deploy")] + DeployUnableToCalculateGasCost, + ///The deploy sent to the network was invalid for an unspecified reason + #[error("The deploy sent to the network was invalid for an unspecified reason")] + DeployUnspecified, + /// The transaction sent to the network had an invalid chain name + #[error("The transaction sent to the network had an invalid chain name")] + TransactionChainName, + /// The transaction sent to the network had an excessive size + #[error("The transaction sent to the network had an excessive size")] + TransactionExcessiveSize, + /// The transaction sent to the network had an excessive time to live + #[error("The transaction sent to the network had an excessive time to live")] + TransactionExcessiveTimeToLive, + /// The transaction sent to the network had a timestamp located in the future. + #[error("The transaction sent to the network had a timestamp that has not yet occurred")] + TransactionTimestampInFuture, + /// The transaction sent to the network had a provided body hash that conflicted with hash + /// derived by the network + #[error("The transaction sent to the network had an invalid body hash")] + TransactionBodyHash, + /// The transaction sent to the network had a provided hash that conflicted with the hash + /// derived by the network + #[error("The transaction sent to the network had an invalid hash")] + TransactionHash, + /// The transaction sent to the network had an empty approvals set + #[error("The transaction sent to the network had no approvals")] + TransactionEmptyApprovals, + /// The transaction sent to the network had an invalid approval + #[error("The transaction sent to the network had an invalid approval")] + TransactionInvalidApproval, + /// The transaction sent to the network had excessive args length + #[error("The transaction sent to the network had excessive args length")] + TransactionExcessiveArgsLength, + /// The transaction sent to the network had excessive approvals + #[error("The transaction sent to the network had excessive approvals")] + TransactionExcessiveApprovals, + /// The transaction sent to the network exceeds the block gas limit + #[error("The transaction sent to the network exceeds the networks block gas limit")] + TransactionExceedsBlockGasLimit, + /// The transaction sent to the network had a missing arg + #[error("The transaction sent to the network was missing an argument")] + TransactionMissingArg, + /// The transaction sent to the network had an argument with an unexpected type + #[error("The transaction sent to the network had an unexpected argument type")] + TransactionUnexpectedArgType, + /// The transaction sent to the network had an invalid argument + #[error("The transaction sent to the network had an invalid argument")] + TransactionInvalidArg, + /// The transaction sent to the network had an insufficient transfer amount + #[error("The transaction sent to the network had an insufficient transfer amount")] + TransactionInsufficientTransferAmount, + /// The transaction sent to the network had a custom entry point when it should have a non + /// custom entry point. + #[error("The native transaction sent to the network should not have a custom entry point")] + TransactionEntryPointCannotBeCustom, + /// The transaction sent to the network had a standard entry point when it must be custom. + #[error("The non-native transaction sent to the network must have a custom entry point")] + TransactionEntryPointMustBeCustom, + /// The transaction sent to the network had empty module bytes + #[error("The transaction sent to the network had empty module bytes")] + TransactionEmptyModuleBytes, + /// The transaction sent to the network had an invalid gas price conversion + #[error("The transaction sent to the network had an invalid gas price conversion")] + TransactionGasPriceConversion, + /// The network was unable to calculate the gas limit for the transaction sent. + #[error("The network was unable to calculate the gas limit for the transaction sent")] + TransactionUnableToCalculateGasLimit, + /// The network was unable to calculate the gas cost for the transaction sent. + #[error("The network was unable to calculate the gas cost for the transaction sent.")] + TransactionUnableToCalculateGasCost, + /// The transaction sent to the network had an invalid pricing mode + #[error("The transaction sent to the network had an invalid pricing mode")] + TransactionPricingMode, + /// The transaction sent to the network was invalid for an unspecified reason + #[error("The transaction sent to the network was invalid for an unspecified reason")] + TransactionUnspecified, + /// The catchall error from a casper node + #[error("The transaction or deploy sent to the network was invalid for an unspecified reason")] + TransactionOrDeployUnspecified, + /// Blockchain is empty + #[error("blockchain is empty")] + EmptyBlockchain, + /// Expected deploy, but got transaction + #[error("expected deploy, got transaction")] + ExpectedDeploy, + /// Expected transaction, but got deploy + #[error("expected transaction V1, got deploy")] + ExpectedTransaction, + /// Transaction has expired + #[error("transaction has expired")] + TransactionExpired, + /// Transactions parameters are missing or incorrect + #[error("missing or incorrect transaction parameters")] + MissingOrIncorrectParameters, + /// No such addressable entity + #[error("no such addressable entity")] + NoSuchAddressableEntity, + // No such contract at hash + #[error("no such contract at hash")] + NoSuchContractAtHash, + /// No such entry point + #[error("no such entry point")] + NoSuchEntryPoint, + /// No such package at hash + #[error("no such package at hash")] + NoSuchPackageAtHash, + /// Invalid entity at version + #[error("invalid entity at version")] + InvalidEntityAtVersion, + /// Disabled entity at version + #[error("disabled entity at version")] + DisabledEntityAtVersion, + /// Missing entity at version + #[error("missing entity at version")] + MissingEntityAtVersion, + /// Invalid associated keys + #[error("invalid associated keys")] + InvalidAssociatedKeys, + /// Insufficient signature weight + #[error("insufficient signature weight")] + InsufficientSignatureWeight, + /// Insufficient balance + #[error("insufficient balance")] + InsufficientBalance, + /// Unknown balance + #[error("unknown balance")] + UnknownBalance, + /// Invalid payment variant for deploy + #[error("invalid payment variant for deploy")] + DeployInvalidPaymentVariant, + /// Missing transfer target for deploy + #[error("missing transfer target for deploy")] + DeployMissingTransferTarget, + /// Missing module bytes for deploy + #[error("missing module bytes for deploy")] + DeployMissingModuleBytes, + /// Entry point cannot be 'call' + #[error("entry point cannot be 'call'")] + InvalidTransactionEntryPointCannotBeCall, + /// Invalid transaction lane + #[error("invalid transaction lane")] + InvalidTransactionInvalidTransactionLane, + #[error("expected named arguments")] + ExpectedNamedArguments, + #[error("invalid transaction runtime")] + InvalidTransactionRuntime, + #[error("couldn't associate a transaction lane with the transaction")] + InvalidTransactionNoWasmLaneMatches, + #[error("entry point must be 'call'")] + InvalidTransactionEntryPointMustBeCall, + #[error("One of the payloads field cannot be deserialized")] + InvalidTransactionCannotDeserializeField, + #[error("Can't calculate hash of the payload fields")] + InvalidTransactionCannotCalculateFieldsHash, + #[error("Unexpected fields in payload")] + InvalidTransactionUnexpectedFields, + #[error("expected bytes arguments")] + InvalidTransactionExpectedBytesArguments, + #[error("Missing seed field in transaction")] + InvalidTransactionMissingSeed, + #[error("Pricing mode not supported")] + PricingModeNotSupported, + #[error("Gas limit not supported")] + InvalidDeployGasLimitNotSupported, + #[error("Invalid runtime for Transaction::Deploy")] + InvalidDeployInvalidRuntime, +} + +impl From for InvalidTransactionOrDeploy { + fn from(value: ErrorCode) -> Self { + match value { + ErrorCode::InvalidDeployChainName => Self::DeployChainName, + ErrorCode::InvalidDeployDependenciesNoLongerSupported => { + Self::DeployDependenciesNoLongerSupported + } + ErrorCode::InvalidDeployExcessiveSize => Self::DeployExcessiveSize, + ErrorCode::InvalidDeployExcessiveTimeToLive => Self::DeployExcessiveTimeToLive, + ErrorCode::InvalidDeployTimestampInFuture => Self::DeployTimestampInFuture, + ErrorCode::InvalidDeployBodyHash => Self::DeployBodyHash, + ErrorCode::InvalidDeployHash => Self::DeployHash, + ErrorCode::InvalidDeployEmptyApprovals => Self::DeployEmptyApprovals, + ErrorCode::InvalidDeployApproval => Self::DeployApproval, + ErrorCode::InvalidDeployExcessiveSessionArgsLength => { + Self::DeployExcessiveSessionArgsLength + } + ErrorCode::InvalidDeployExcessivePaymentArgsLength => { + Self::DeployExcessivePaymentArgsLength + } + ErrorCode::InvalidDeployMissingPaymentAmount => Self::DeployMissingPaymentAmount, + ErrorCode::InvalidDeployFailedToParsePaymentAmount => { + Self::DeployFailedToParsePaymentAmount + } + ErrorCode::InvalidDeployExceededBlockGasLimit => Self::DeployExceededBlockGasLimit, + ErrorCode::InvalidDeployMissingTransferAmount => Self::DeployMissingTransferAmount, + ErrorCode::InvalidDeployFailedToParseTransferAmount => { + Self::DeployFailedToParseTransferAmount + } + ErrorCode::InvalidDeployInsufficientTransferAmount => { + Self::DeployInsufficientTransferAmount + } + ErrorCode::InvalidDeployExcessiveApprovals => Self::DeployExcessiveApprovals, + ErrorCode::InvalidDeployUnableToCalculateGasLimit => { + Self::DeployUnableToCalculateGasLimit + } + ErrorCode::InvalidDeployUnableToCalculateGasCost => { + Self::DeployUnableToCalculateGasCost + } + ErrorCode::InvalidDeployUnspecified => Self::DeployUnspecified, + ErrorCode::InvalidTransactionChainName => Self::TransactionChainName, + ErrorCode::InvalidTransactionExcessiveSize => Self::TransactionExcessiveSize, + ErrorCode::InvalidTransactionExcessiveTimeToLive => { + Self::TransactionExcessiveTimeToLive + } + ErrorCode::InvalidTransactionTimestampInFuture => Self::TransactionTimestampInFuture, + ErrorCode::InvalidTransactionBodyHash => Self::TransactionBodyHash, + ErrorCode::InvalidTransactionHash => Self::TransactionHash, + ErrorCode::InvalidTransactionEmptyApprovals => Self::TransactionEmptyApprovals, + ErrorCode::InvalidTransactionInvalidApproval => Self::TransactionInvalidApproval, + ErrorCode::InvalidTransactionExcessiveArgsLength => { + Self::TransactionExcessiveArgsLength + } + ErrorCode::InvalidTransactionExcessiveApprovals => Self::TransactionExcessiveApprovals, + ErrorCode::InvalidTransactionExceedsBlockGasLimit => { + Self::TransactionExceedsBlockGasLimit + } + ErrorCode::InvalidTransactionMissingArg => Self::TransactionMissingArg, + ErrorCode::InvalidTransactionUnexpectedArgType => Self::TransactionUnexpectedArgType, + ErrorCode::InvalidTransactionInvalidArg => Self::TransactionInvalidArg, + ErrorCode::InvalidTransactionInsufficientTransferAmount => { + Self::TransactionInsufficientTransferAmount + } + ErrorCode::InvalidTransactionEntryPointCannotBeCustom => { + Self::TransactionEntryPointCannotBeCustom + } + ErrorCode::InvalidTransactionEntryPointMustBeCustom => { + Self::TransactionEntryPointMustBeCustom + } + ErrorCode::InvalidTransactionEmptyModuleBytes => Self::TransactionEmptyModuleBytes, + ErrorCode::InvalidTransactionGasPriceConversion => Self::TransactionGasPriceConversion, + ErrorCode::InvalidTransactionUnableToCalculateGasLimit => { + Self::TransactionUnableToCalculateGasLimit + } + ErrorCode::InvalidTransactionUnableToCalculateGasCost => { + Self::TransactionUnableToCalculateGasCost + } + ErrorCode::InvalidTransactionPricingMode => Self::TransactionPricingMode, + ErrorCode::EmptyBlockchain => Self::EmptyBlockchain, + ErrorCode::ExpectedDeploy => Self::ExpectedDeploy, + ErrorCode::ExpectedTransaction => Self::ExpectedTransaction, + ErrorCode::TransactionExpired => Self::TransactionExpired, + ErrorCode::MissingOrIncorrectParameters => Self::MissingOrIncorrectParameters, + ErrorCode::NoSuchAddressableEntity => Self::NoSuchAddressableEntity, + ErrorCode::NoSuchContractAtHash => Self::NoSuchContractAtHash, + ErrorCode::NoSuchEntryPoint => Self::NoSuchEntryPoint, + ErrorCode::NoSuchPackageAtHash => Self::NoSuchPackageAtHash, + ErrorCode::InvalidEntityAtVersion => Self::InvalidEntityAtVersion, + ErrorCode::DisabledEntityAtVersion => Self::DisabledEntityAtVersion, + ErrorCode::MissingEntityAtVersion => Self::MissingEntityAtVersion, + ErrorCode::InvalidAssociatedKeys => Self::InvalidAssociatedKeys, + ErrorCode::InsufficientSignatureWeight => Self::InsufficientSignatureWeight, + ErrorCode::InsufficientBalance => Self::InsufficientBalance, + ErrorCode::UnknownBalance => Self::UnknownBalance, + ErrorCode::DeployInvalidPaymentVariant => Self::DeployInvalidPaymentVariant, + ErrorCode::DeployMissingPaymentAmount => Self::DeployMissingPaymentAmount, + ErrorCode::DeployFailedToParsePaymentAmount => Self::DeployFailedToParsePaymentAmount, + ErrorCode::DeployMissingTransferTarget => Self::DeployMissingTransferTarget, + ErrorCode::DeployMissingModuleBytes => Self::DeployMissingModuleBytes, + ErrorCode::InvalidTransactionEntryPointCannotBeCall => { + Self::InvalidTransactionEntryPointCannotBeCall + } + ErrorCode::InvalidTransactionInvalidTransactionLane => { + Self::InvalidTransactionInvalidTransactionLane + } + ErrorCode::InvalidTransactionUnspecified => Self::TransactionUnspecified, + ErrorCode::InvalidTransactionOrDeployUnspecified => { + Self::TransactionOrDeployUnspecified + } + ErrorCode::ExpectedNamedArguments => Self::ExpectedNamedArguments, + ErrorCode::InvalidTransactionRuntime => Self::InvalidTransactionRuntime, + ErrorCode::InvalidTransactionNoWasmLaneMatches => { + Self::InvalidTransactionNoWasmLaneMatches + } + ErrorCode::InvalidTransactionEntryPointMustBeCall => { + Self::InvalidTransactionEntryPointMustBeCall + } + ErrorCode::InvalidTransactionCannotDeserializeField => { + Self::InvalidTransactionCannotDeserializeField + } + ErrorCode::InvalidTransactionCannotCalculateFieldsHash => { + Self::InvalidTransactionCannotCalculateFieldsHash + } + ErrorCode::InvalidTransactionUnexpectedFields => { + Self::InvalidTransactionUnexpectedFields + } + ErrorCode::InvalidTransactionExpectedBytesArguments => { + Self::InvalidTransactionExpectedBytesArguments + } + ErrorCode::InvalidTransactionMissingSeed => Self::InvalidTransactionMissingSeed, + ErrorCode::PricingModeNotSupported => Self::PricingModeNotSupported, + ErrorCode::InvalidDeployGasLimitNotSupported => Self::InvalidDeployGasLimitNotSupported, + ErrorCode::InvalidDeployInvalidRuntime => Self::InvalidDeployInvalidRuntime, + _ => Self::TransactionOrDeployUnspecified, + } + } +} + +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum Error { + #[error("request error: {0}")] + RequestFailed(String), + #[error("request id mismatch: expected {expected}, got {got}")] + RequestResponseIdMismatch { expected: u16, got: u16 }, + #[error("failed to get a response with correct id {max} times, giving up")] + TooManyMismatchedResponses { max: u8 }, + #[error("failed to deserialize the original request provided with the response: {0}")] + OriginalRequestDeserialization(String), + #[error("failed to deserialize the envelope of a response")] + EnvelopeDeserialization, + #[error("failed to deserialize a response: {0}")] + Deserialization(String), + #[error("failed to serialize a request: {0}")] + Serialization(String), + #[error("unexpectedly received no response body")] + NoResponseBody, + #[error("unexpectedly received an empty envelope")] + EmptyEnvelope, + #[error("unexpected payload variant received in the response: {0}")] + UnexpectedVariantReceived(u8), + #[error("attempted to use a function that's disabled on the node")] + FunctionIsDisabled, + #[error("could not find the provided state root hash")] + UnknownStateRootHash, + #[error("the provided global state query failed to execute")] + QueryFailedToExecute, + #[error("could not execute the provided transaction: {0}")] + InvalidTransaction(InvalidTransactionOrDeploy), + #[error("speculative execution has failed: {0}")] + SpecExecutionFailed(String), + #[error("the switch block for the requested era was not found")] + SwitchBlockNotFound, + #[error("the parent of the switch block for the requested era was not found")] + SwitchBlockParentNotFound, + #[error("cannot serve rewards stored in V1 format")] + UnsupportedRewardsV1Request, + #[error("purse was not found for given identifier")] + PurseNotFound, + #[error("received an unexpected node error: {message} ({code})")] + UnexpectedNodeError { message: String, code: u16 }, + #[error("binary protocol version mismatch")] + CommandHeaderVersionMismatch, + #[error("request was throttled by the node")] + RequestThrottled, + #[error("malformed information request")] + MalformedInformationRequest, + #[error("malformed version bytes in command header")] + TooLittleBytesForRequestHeaderVersion, + #[error("malformed protocol version")] + MalformedCommandHeaderVersion, + #[error("malformed transfer record key")] + TransferRecordMalformedKey, + #[error("malformed command header")] + MalformedCommandHeader, + #[error("malformed command")] + MalformedCommand, + #[error("not found")] + NotFound, + #[error("node reported internal error")] + InternalNodeError, + #[error("bad request")] + BadRequest, + #[error("unsupported request")] + UnsupportedRequest, + #[error("dictionary URef not found")] + DictionaryURefNotFound, + #[error("no complete blocks")] + NoCompleteBlocks, + #[error("gas price tolerance too low")] + GasPriceToleranceTooLow, + #[error("received v1 transaction for speculative execution")] + ReceivedV1Transaction, + #[error("connection to node lost")] + ConnectionLost, +} + +impl Error { + fn from_error_code(code: u16) -> Self { + match ErrorCode::try_from(code) { + Ok(ErrorCode::FunctionDisabled) => Self::FunctionIsDisabled, + Ok(ErrorCode::RootNotFound) => Self::UnknownStateRootHash, + Ok(ErrorCode::FailedQuery) => Self::QueryFailedToExecute, + Ok(ErrorCode::SwitchBlockNotFound) => Self::SwitchBlockNotFound, + Ok(ErrorCode::SwitchBlockParentNotFound) => Self::SwitchBlockParentNotFound, + Ok(ErrorCode::UnsupportedRewardsV1Request) => Self::UnsupportedRewardsV1Request, + Ok(ErrorCode::CommandHeaderVersionMismatch) => Self::CommandHeaderVersionMismatch, + Ok(ErrorCode::PurseNotFound) => Self::PurseNotFound, + Ok( + err @ (ErrorCode::InvalidDeployChainName + | ErrorCode::InvalidDeployDependenciesNoLongerSupported + | ErrorCode::InvalidDeployExcessiveSize + | ErrorCode::InvalidDeployExcessiveTimeToLive + | ErrorCode::InvalidDeployTimestampInFuture + | ErrorCode::InvalidDeployBodyHash + | ErrorCode::InvalidDeployHash + | ErrorCode::InvalidDeployEmptyApprovals + | ErrorCode::InvalidDeployApproval + | ErrorCode::InvalidDeployExcessiveSessionArgsLength + | ErrorCode::InvalidDeployExcessivePaymentArgsLength + | ErrorCode::InvalidDeployMissingPaymentAmount + | ErrorCode::InvalidDeployFailedToParsePaymentAmount + | ErrorCode::InvalidDeployExceededBlockGasLimit + | ErrorCode::InvalidDeployMissingTransferAmount + | ErrorCode::InvalidDeployFailedToParseTransferAmount + | ErrorCode::InvalidDeployInsufficientTransferAmount + | ErrorCode::InvalidDeployExcessiveApprovals + | ErrorCode::InvalidDeployUnableToCalculateGasLimit + | ErrorCode::InvalidDeployUnableToCalculateGasCost + | ErrorCode::InvalidDeployUnspecified + | ErrorCode::InvalidTransactionChainName + | ErrorCode::InvalidTransactionExcessiveSize + | ErrorCode::InvalidTransactionExcessiveTimeToLive + | ErrorCode::InvalidTransactionTimestampInFuture + | ErrorCode::InvalidTransactionBodyHash + | ErrorCode::InvalidTransactionHash + | ErrorCode::InvalidTransactionEmptyApprovals + | ErrorCode::InvalidTransactionInvalidApproval + | ErrorCode::InvalidTransactionExcessiveArgsLength + | ErrorCode::InvalidTransactionExcessiveApprovals + | ErrorCode::InvalidTransactionExceedsBlockGasLimit + | ErrorCode::InvalidTransactionMissingArg + | ErrorCode::InvalidTransactionUnexpectedArgType + | ErrorCode::InvalidTransactionInvalidArg + | ErrorCode::InvalidTransactionInsufficientTransferAmount + | ErrorCode::InvalidTransactionEntryPointCannotBeCustom + | ErrorCode::InvalidTransactionEntryPointMustBeCustom + | ErrorCode::InvalidTransactionEmptyModuleBytes + | ErrorCode::InvalidTransactionGasPriceConversion + | ErrorCode::InvalidTransactionUnableToCalculateGasLimit + | ErrorCode::InvalidTransactionUnableToCalculateGasCost + | ErrorCode::InvalidTransactionPricingMode + | ErrorCode::EmptyBlockchain + | ErrorCode::ExpectedDeploy + | ErrorCode::ExpectedTransaction + | ErrorCode::TransactionExpired + | ErrorCode::MissingOrIncorrectParameters + | ErrorCode::NoSuchAddressableEntity + | ErrorCode::NoSuchContractAtHash + | ErrorCode::NoSuchEntryPoint + | ErrorCode::NoSuchPackageAtHash + | ErrorCode::InvalidEntityAtVersion + | ErrorCode::DisabledEntityAtVersion + | ErrorCode::MissingEntityAtVersion + | ErrorCode::InvalidAssociatedKeys + | ErrorCode::InsufficientSignatureWeight + | ErrorCode::InsufficientBalance + | ErrorCode::UnknownBalance + | ErrorCode::DeployInvalidPaymentVariant + | ErrorCode::DeployMissingPaymentAmount + | ErrorCode::DeployFailedToParsePaymentAmount + | ErrorCode::DeployMissingTransferTarget + | ErrorCode::DeployMissingModuleBytes + | ErrorCode::InvalidTransactionEntryPointCannotBeCall + | ErrorCode::InvalidTransactionInvalidTransactionLane + | ErrorCode::InvalidTransactionUnspecified + | ErrorCode::InvalidTransactionOrDeployUnspecified + | ErrorCode::ExpectedNamedArguments + | ErrorCode::InvalidTransactionRuntime + | ErrorCode::InvalidTransactionNoWasmLaneMatches + | ErrorCode::InvalidTransactionEntryPointMustBeCall + | ErrorCode::InvalidTransactionCannotDeserializeField + | ErrorCode::InvalidTransactionCannotCalculateFieldsHash + | ErrorCode::InvalidTransactionUnexpectedFields + | ErrorCode::InvalidTransactionExpectedBytesArguments + | ErrorCode::InvalidTransactionMissingSeed + | ErrorCode::PricingModeNotSupported + | ErrorCode::InvalidDeployGasLimitNotSupported + | ErrorCode::InvalidDeployInvalidRuntime), + ) => Self::InvalidTransaction(InvalidTransactionOrDeploy::from(err)), + Ok(ErrorCode::RequestThrottled) => Self::RequestThrottled, + Ok(ErrorCode::MalformedInformationRequest) => Self::MalformedInformationRequest, + Ok(ErrorCode::TooLittleBytesForRequestHeaderVersion) => { + Self::TooLittleBytesForRequestHeaderVersion + } + Ok(ErrorCode::MalformedCommandHeaderVersion) => Self::MalformedCommandHeaderVersion, + Ok(ErrorCode::TransferRecordMalformedKey) => Self::TransferRecordMalformedKey, + Ok(ErrorCode::MalformedCommandHeader) => Self::MalformedCommandHeader, + Ok(ErrorCode::MalformedCommand) => Self::MalformedCommand, + Ok(err @ (ErrorCode::WasmPreprocessing | ErrorCode::InvalidItemVariant)) => { + Self::SpecExecutionFailed(err.to_string()) + } + Ok(ErrorCode::NotFound) => Self::NotFound, + Ok(ErrorCode::InternalError) => Self::InternalNodeError, + Ok(ErrorCode::BadRequest) => Self::BadRequest, + Ok(ErrorCode::UnsupportedRequest) => Self::UnsupportedRequest, + Ok(ErrorCode::DictionaryURefNotFound) => Self::DictionaryURefNotFound, + Ok(ErrorCode::NoCompleteBlocks) => Self::NoCompleteBlocks, + Ok(ErrorCode::GasPriceToleranceTooLow) => Self::GasPriceToleranceTooLow, + Ok(ErrorCode::ReceivedV1Transaction) => Self::ReceivedV1Transaction, + Ok(ErrorCode::NoError) => { + //This code shouldn't be passed in an error scenario + Self::UnexpectedNodeError { + message: "Received unexpected 'NoError' error code".to_string(), + code: ErrorCode::NoError as u16, + } + } + Err(err) => Self::UnexpectedNodeError { + message: err.to_string(), + code, + }, + } + } +} + +struct Reconnect; + +struct Notify { + inner: tokio::sync::Notify, + phantom: std::marker::PhantomData, +} + +impl Notify { + fn new() -> Arc { + Arc::new(Self { + inner: tokio::sync::Notify::new(), + phantom: std::marker::PhantomData, + }) + } + + fn notified(&self) -> Notified { + self.inner.notified() + } + + fn notify_one(&self) { + self.inner.notify_one() + } +} + +pub struct FramedNodeClient { + client: Arc>>, + reconnect: Arc>, + config: NodeClientConfig, + current_request_id: Arc, +} + +impl FramedNodeClient { + pub async fn new( + config: NodeClientConfig, + maybe_network_name: Option, + ) -> Result< + ( + Arc, + impl Future>, + impl Future>, + ), + AnyhowError, + > { + let stream = Arc::new(RwLock::new( + Self::connect_with_retries( + &config.ip_address, + config.port, + &config.exponential_backoff, + config.max_message_size_bytes, + ) + .await?, + )); + + let reconnect = Notify::::new(); + + let reconnect_loop = + Self::reconnect_loop(config.clone(), Arc::clone(&stream), Arc::clone(&reconnect)); + let keepalive_timeout = Duration::from_millis(config.keepalive_timeout_ms); + let node_client = Arc::new(Self { + client: Arc::clone(&stream), + reconnect, + config, + current_request_id: AtomicU16::new(INITIAL_REQUEST_ID).into(), + }); + let keepalive_loop = Self::keepalive_loop(node_client.clone(), keepalive_timeout); + + // validate network name + if let Some(network_name) = maybe_network_name { + let node_status = node_client.read_node_status().await?; + if network_name != node_status.chainspec_name { + let msg = format!("Network name {} does't match name {network_name} configured for node RPC connection", node_status.chainspec_name); + error!("{msg}"); + return Err(AnyhowError::msg(msg)); + } + } + + Ok((node_client, reconnect_loop, keepalive_loop)) + } + + fn next_id(&self) -> u16 { + self.current_request_id.fetch_add(1, Ordering::Relaxed) + } + + async fn reconnect_loop( + config: NodeClientConfig, + client: Arc>>, + reconnect: Arc>, + ) -> Result<(), AnyhowError> { + loop { + tokio::select! { + _ = reconnect.notified() => { + let mut lock = client.write().await; + let new_client = Self::reconnect(&config.clone()).await?; + *lock = new_client; + }, + } + } + } + + async fn keepalive_loop( + client: Arc, + keepalive_timeout: Duration, + ) -> Result<(), AnyhowError> { + loop { + tokio::time::sleep(keepalive_timeout).await; + client + .send_request(Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::ProtocolVersion.into(), + key: vec![], + })) + .await?; + } + } + + async fn send_request_internal( + &self, + req: &Command, + client: &mut RwLockWriteGuard<'_, Framed>, + ) -> Result { + let (request_id, payload) = self.generate_payload(req); + + if let Err(err) = tokio::time::timeout( + Duration::from_secs(self.config.message_timeout_secs), + client.send(payload), + ) + .await + .map_err(|_| { + register_timeout("sending_payload"); + Error::RequestFailed("timeout".to_owned()) + })? { + return Err(Error::RequestFailed(err.to_string())); + }; + + for _ in 0..MAX_MISMATCHED_ID_RETRIES { + let Ok(maybe_response) = tokio::time::timeout( + Duration::from_secs(self.config.message_timeout_secs), + client.next(), + ) + .await + else { + register_timeout("receiving_response"); + return Err(Error::ConnectionLost); + }; + + if let Some(response) = maybe_response { + let resp = bytesrepr::deserialize_from_slice( + response + .map_err(|err| Error::RequestFailed(err.to_string()))? + .payload(), + ) + .map_err(|err| { + error!( + "Error when deserializing binary port envelope: {}", + err.to_string() + ); + Error::EnvelopeDeserialization + })?; + match validate_response(resp, request_id) { + Ok(response) => return Ok(response), + Err(err) if matches!(err, Error::RequestResponseIdMismatch { expected, got } if expected > got) => + { + // If our expected ID is greater than the one we received, it means we can + // try to recover from the situation by reading more responses from the stream. + warn!(%err, "received a response with an outdated id, trying another response"); + register_mismatched_id(); + continue; + } + Err(err) => { + register_mismatched_id(); + return Err(err); + } + } + } else { + return Err(Error::ConnectionLost); + } + } + + Err(Error::TooManyMismatchedResponses { + max: MAX_MISMATCHED_ID_RETRIES, + }) + } + + fn generate_payload(&self, req: &Command) -> (u16, BinaryMessage) { + let next_id = self.next_id(); + ( + next_id, + BinaryMessage::new( + encode_request(req, next_id).expect("should always serialize a request"), + ), + ) + } + + async fn connect_with_retries( + ip_address: &IpAddr, + port: u16, + backoff_config: &ExponentialBackoffConfig, + max_message_size_bytes: u32, + ) -> Result, AnyhowError> { + let mut wait = backoff_config.initial_delay_ms; + let max_attempts = &backoff_config.max_attempts; + let tcp_socket = SocketAddr::new(*ip_address, port); + let mut current_attempt = 1; + loop { + match TcpStream::connect(tcp_socket).await { + Ok(stream) => { + return Ok(Framed::new( + stream, + BinaryMessageCodec::new(max_message_size_bytes), + )) + } + Err(err) => { + current_attempt += 1; + if *max_attempts < current_attempt { + anyhow::bail!( + "Couldn't connect to node {} after {} attempts", + tcp_socket, + current_attempt - 1 + ); + } + warn!(%err, "failed to connect to node {tcp_socket}, waiting {wait}ms before retrying"); + tokio::time::sleep(Duration::from_millis(wait)).await; + wait = (wait * backoff_config.coefficient).min(backoff_config.max_delay_ms); + } + }; + } + } + + async fn reconnect_internal( + config: &NodeClientConfig, + ) -> Result, AnyhowError> { + let disconnected_start = Instant::now(); + inc_disconnect(); + error!("node connection closed, will attempt to reconnect"); + let stream = Self::connect_with_retries( + &config.ip_address, + config.port, + &config.exponential_backoff, + config.max_message_size_bytes, + ) + .await?; + info!("connection with the node has been re-established"); + observe_reconnect_time(disconnected_start.elapsed()); + Ok(stream) + } + + async fn reconnect( + config: &NodeClientConfig, + ) -> Result, AnyhowError> { + Self::reconnect_internal(config).await + } +} + +#[async_trait] +impl NodeClient for FramedNodeClient { + async fn send_request(&self, req: Command) -> Result { + let mut client = match tokio::time::timeout( + Duration::from_secs(self.config.client_access_timeout_secs), + self.client.write(), + ) + .await + { + Ok(client) => client, + Err(_) => { + register_timeout("acquiring_client"); + return Err(Error::ConnectionLost); + } + }; + + let result = self.send_request_internal(&req, &mut client).await; + if let Err(err) = &result { + warn!( + addr = %self.config.ip_address, + err = display_error(&err), + "binary port client handler error" + ); + // attempt to reconnect in case the node was restarted and connection broke + client.close().await.ok(); + let ip_address = &self.config.ip_address; + + match tokio::time::timeout( + Duration::from_secs(self.config.client_access_timeout_secs), + Self::connect_with_retries( + ip_address, + self.config.port, + &self.config.exponential_backoff, + self.config.max_message_size_bytes, + ), + ) + .await + { + Ok(Ok(new_client)) => { + *client = new_client; + return self.send_request_internal(&req, &mut client).await; + } + Ok(Err(err)) => { + warn!( + %err, + addr = %self.config.ip_address, + "binary port client failed to reconnect" + ); + // schedule standard reconnect process with multiple retries + // and return a response + self.reconnect.notify_one(); + return Err(Error::ConnectionLost); + } + Err(_) => { + warn!( + %err, + addr = %self.config.ip_address, + "failed to reestablish connection in timely fashion" + ); + register_timeout("reacquiring_connection"); + return Err(Error::ConnectionLost); + } + } + } + result + } +} + +#[derive(Debug)] +pub enum EntityResponse { + Entity(AddressableEntityInformation), + Account(AccountInformation), + Contract(ContractInformation), +} + +#[derive(Debug)] +pub enum PackageResponse { + Package(ValueWithProof), + ContractPackage(ValueWithProof), +} + +fn validate_response( + resp: BinaryResponseAndRequest, + expected_id: u16, +) -> Result { + let original_id = match try_parse_request_id(resp.request()) { + Ok(id) => id, + Err(e) => { + error!("Error when decoding original_id: {}", e); + 0 + } + }; + if original_id != expected_id { + return Err(Error::RequestResponseIdMismatch { + expected: expected_id, + got: original_id, + }); + } + Ok(resp) +} + +fn try_parse_request_id(request: &[u8]) -> Result { + if request.len() < 4 { + anyhow::bail!( + "Node responded with request that doesn't have enough bytes to read payload length" + ) + } + let (payload_length, remainder) = + u32::from_bytes(request).map_err(|e| anyhow::Error::msg(e.to_string()))?; + if payload_length != remainder.len() as u32 { + anyhow::bail!("Node responded with request that has a mismatch in declared bytes vs the payload length") + } + let (header, _) = extract_header(remainder)?; + Ok(header.id()) +} + +fn extract_header(payload: &[u8]) -> Result<(CommandHeader, &[u8]), anyhow::Error> { + const BINARY_VERSION_LENGTH_BYTES: usize = std::mem::size_of::(); + + if payload.len() < BINARY_VERSION_LENGTH_BYTES { + anyhow::bail!("Not enough bytes to read version of the command header"); + } + + let binary_protocol_version = match u16::from_bytes(payload) { + Ok((binary_protocol_version, _)) => binary_protocol_version, + Err(_) => { + anyhow::bail!("Could not read header version from request"); + } + }; + + if binary_protocol_version != CommandHeader::HEADER_VERSION { + anyhow::bail!("Header version does not meet expectation"); + } + + match CommandHeader::from_bytes(payload) { + Ok((header, remainder)) => Ok((header, remainder)), + Err(error) => { + anyhow::bail!("Malformed CommandHeader definition: {}", error); + } + } +} + +fn parse_response(resp: &BinaryResponse) -> Result, Error> +where + A: FromBytes + PayloadEntity, +{ + if resp.is_not_found() { + return Ok(None); + } + if !resp.is_success() { + return Err(Error::from_error_code(resp.error_code())); + } + match resp.returned_data_type_tag() { + Some(found) if found == u8::from(A::RESPONSE_TYPE) => { + bytesrepr::deserialize_from_slice(resp.payload()) + .map(Some) + .map_err(|err| Error::Deserialization(err.to_string())) + } + Some(other) => Err(Error::UnexpectedVariantReceived(other)), + _ => Ok(None), + } +} + +fn parse_response_bincode(resp: &BinaryResponse) -> Result, Error> +where + A: DeserializeOwned + PayloadEntity, +{ + if resp.is_not_found() { + return Ok(None); + } + if !resp.is_success() { + return Err(Error::from_error_code(resp.error_code())); + } + match resp.returned_data_type_tag() { + Some(found) if found == u8::from(A::RESPONSE_TYPE) => bincode::deserialize(resp.payload()) + .map(Some) + .map_err(|err| Error::Deserialization(err.to_string())), + Some(other) => Err(Error::UnexpectedVariantReceived(other)), + _ => Ok(None), + } +} + +/// Wraps an error to ensure it gets properly captured by tracing. +pub(crate) fn display_error<'a, T>(err: &'a T) -> field::DisplayValue> +where + T: std::error::Error + 'a, +{ + field::display(ErrFormatter(err)) +} + +/// An error formatter. +#[derive(Clone, Copy, Debug)] +pub(crate) struct ErrFormatter<'a, T>(pub &'a T); + +impl Display for ErrFormatter<'_, T> +where + T: std::error::Error, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut opt_source: Option<&(dyn std::error::Error)> = Some(self.0); + + while let Some(source) = opt_source { + write!(f, "{}", source)?; + opt_source = source.source(); + + if opt_source.is_some() { + f.write_str(": ")?; + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::testing::{ + get_dummy_request, get_dummy_request_payload, get_port, start_mock_binary_port, + start_mock_binary_port_responding_with_given_response, + start_mock_binary_port_responding_with_stored_value, + }; + + use super::*; + use casper_binary_port::{CommandHeader, ReactorStateName}; + use casper_types::{ + testing::TestRng, BlockSynchronizerStatus, CLValue, ProtocolVersion, TimeDiff, Timestamp, + }; + use tokio::time::sleep; + + #[tokio::test] + async fn given_client_and_no_node_should_fail_after_tries() { + let config = NodeClientConfig::new_with_port_and_retries(1111, 2); + let res = FramedNodeClient::new(config, None).await; + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + + assert!(error_message.starts_with("Couldn't connect to node")); + assert!(error_message.ends_with(" after 2 attempts")); + } + + #[tokio::test] + async fn given_client_and_node_should_connect_and_do_request() { + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown), + ) + .await; + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let (c, _, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap(); + + assert_eq!(res, StoredValue::CLValue(CLValue::from_t("Foo").unwrap())) + } + + #[tokio::test] + async fn given_client_should_try_until_node_starts() { + let mut rng = TestRng::new(); + let port = get_port(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + tokio::spawn(async move { + sleep(Duration::from_secs(5)).await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown), + ) + .await; + }); + let config = NodeClientConfig::new_with_port_and_retries(port, 5); + let (client, _, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &client) + .await + .unwrap(); + + assert_eq!(res, StoredValue::CLValue(CLValue::from_t("Foo").unwrap())) + } + + #[tokio::test] + async fn should_fail_to_connect_if_network_name_does_not_match() { + // prepare node status response with network name + let mut rng = TestRng::new(); + let protocol_version = ProtocolVersion::from_parts(2, 0, 0); + let value = NodeStatus { + protocol_version, + peers: Peers::random(&mut rng), + build_version: rng.random_string(5..10), + chainspec_name: "network-1".into(), + starting_state_root_hash: Digest::random(&mut rng), + last_added_block_info: None, + our_public_signing_key: None, + round_length: None, + next_upgrade: None, + uptime: TimeDiff::from_millis(0), + reactor_state: ReactorStateName::new(rng.random_string(5..10)), + last_progress: Timestamp::random(&mut rng), + available_block_range: AvailableBlockRange::random(&mut rng), + block_sync: BlockSynchronizerStatus::random(&mut rng), + latest_switch_block_hash: None, + }; + let response = BinaryResponse::from_value(value); + + // setup mock binary port with node status response + let port = get_port(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let request_id = Some(INITIAL_REQUEST_ID); + let _mock_server_handle = start_mock_binary_port_responding_with_given_response( + port, + request_id, + None, + Arc::clone(&shutdown), + response, + ) + .await; + + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let network_name = Some("not-network-1".into()); + let res = FramedNodeClient::new(config, network_name).await; + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + + assert_eq!(error_message, "Network name network-1 does't match name not-network-1 configured for node RPC connection"); + } + + async fn query_global_state_for_string_value( + rng: &mut TestRng, + client: &FramedNodeClient, + ) -> Result { + let state_root_hash = Digest::random(rng); + let base_key = Key::ChecksumRegistry; + client + .query_global_state( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + base_key, + vec![], + ) + .await? + .ok_or(Error::NoResponseBody) + .map(|query_res| query_res.into_inner().0) + } + + #[tokio::test] + async fn given_client_should_reconnect_to_restarted_node_and_do_request() { + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown_server = Arc::new(tokio::sync::Notify::new()); + let mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID), + None, + Arc::clone(&shutdown_server), + ) + .await; + + let config = NodeClientConfig::new_with_port(port); + let (c, reconnect_loop, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let scenario = async { + // Request id = 1 + assert!(query_global_state_for_string_value(&mut rng, &c) + .await + .is_ok()); + + // shutdown node + shutdown_server.notify_one(); + let _ = mock_server_handle.await; + + // Request id = 2 + let err = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap_err(); + assert!(matches!(err, Error::ConnectionLost)); + + // restart node + let mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID + 2), + None, + Arc::clone(&shutdown_server), + ) + .await; + + // wait for reconnect loop to do it's business + tokio::time::sleep(Duration::from_secs(2)).await; + + // Request id = 3 + assert!(query_global_state_for_string_value(&mut rng, &c) + .await + .is_ok()); + + // restart node between requests + shutdown_server.notify_one(); + let _ = mock_server_handle.await; + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(INITIAL_REQUEST_ID + 4), + None, + Arc::clone(&shutdown_server), + ) + .await; + + // Request id = 4 & 5 (retry) + assert!(query_global_state_for_string_value(&mut rng, &c) + .await + .is_ok()); + }; + + tokio::select! { + _ = scenario => (), + _ = reconnect_loop => panic!("reconnect loop should not exit"), + } + } + + #[tokio::test] + async fn should_generate_payload_with_incrementing_id() { + let port = get_port(); + let config = NodeClientConfig::new_with_port(port); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = + start_mock_binary_port(port, vec![], 1, Arc::clone(&shutdown)).await; + let (c, _, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let generated_ids: Vec<_> = (INITIAL_REQUEST_ID..INITIAL_REQUEST_ID + 10) + .map(|_| { + let (_, binary_message) = c.generate_payload(&get_dummy_request()); + let header = CommandHeader::from_bytes(binary_message.payload()) + .unwrap() + .0; + header.id() + }) + .collect(); + + assert_eq!( + generated_ids, + (INITIAL_REQUEST_ID..INITIAL_REQUEST_ID + 10).collect::>() + ); + } + + #[test] + fn should_reject_mismatched_request_id() { + let expected_id = 1; + let actual_id = 2; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(); + let resp_and_req = BinaryResponseAndRequest::new(resp, req); + + let result = validate_response(resp_and_req, expected_id); + assert!(matches!( + result, + Err(Error::RequestResponseIdMismatch { expected, got }) if expected == 1 && got == 2 + )); + + let expected_id = 2; + let actual_id = 1; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(); + let resp_and_req = BinaryResponseAndRequest::new(resp, req); + + let result = validate_response(resp_and_req, expected_id); + assert!(matches!( + result, + Err(Error::RequestResponseIdMismatch { expected, got }) if expected == 2 && got == 1 + )); + } + + #[test] + fn should_accept_matching_request_id() { + let expected_id = 1; + let actual_id = 1; + + let req = get_dummy_request_payload(Some(actual_id)); + let resp = BinaryResponse::new_empty(); + let resp_and_req = BinaryResponseAndRequest::new(resp, req); + + let result = validate_response(resp_and_req, expected_id); + dbg!(&result); + assert!(result.is_ok()) + } + + #[tokio::test] + async fn should_keep_retrying_to_get_response_up_to_the_limit() { + const LIMIT: u8 = MAX_MISMATCHED_ID_RETRIES - 1; + + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(0), + Some(LIMIT), + Arc::clone(&shutdown), + ) + .await; + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let (c, _, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap_err(); + // Expect error different than 'TooManyMismatchResponses' + assert!(!matches!(res, Error::TooManyMismatchedResponses { .. })); + } + + #[tokio::test] + async fn should_quit_retrying_to_get_response_over_the_retry_limit() { + const LIMIT: u8 = MAX_MISMATCHED_ID_RETRIES; + + let port = get_port(); + let mut rng = TestRng::new(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + Some(0), + Some(LIMIT), + Arc::clone(&shutdown), + ) + .await; + let config = NodeClientConfig::new_with_port_and_retries(port, 2); + let (c, _, _) = FramedNodeClient::new(config, None).await.unwrap(); + + let res = query_global_state_for_string_value(&mut rng, &c) + .await + .unwrap_err(); + // Expect 'TooManyMismatchResponses' error + assert!(matches!(res, Error::TooManyMismatchedResponses { max } if max == LIMIT)); + } + + #[test] + fn should_map_errors() { + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionOrDeployUnspecified as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::TransactionOrDeployUnspecified) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::ExpectedNamedArguments as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::ExpectedNamedArguments) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionRuntime as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::InvalidTransactionRuntime) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionNoWasmLaneMatches as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionNoWasmLaneMatches + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionEntryPointMustBeCall as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionEntryPointMustBeCall + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionCannotDeserializeField as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionCannotDeserializeField + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionCannotCalculateFieldsHash as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionCannotCalculateFieldsHash + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionUnexpectedFields as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionUnexpectedFields + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionExpectedBytesArguments as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidTransactionExpectedBytesArguments + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidTransactionMissingSeed as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::InvalidTransactionMissingSeed) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::PricingModeNotSupported as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::PricingModeNotSupported) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidDeployGasLimitNotSupported as u16), + Error::InvalidTransaction( + InvalidTransactionOrDeploy::InvalidDeployGasLimitNotSupported + ) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InvalidDeployInvalidRuntime as u16), + Error::InvalidTransaction(InvalidTransactionOrDeploy::InvalidDeployInvalidRuntime) + )); + assert!(matches!( + Error::from_error_code(ErrorCode::MalformedInformationRequest as u16), + Error::MalformedInformationRequest + )); + assert!(matches!( + Error::from_error_code(ErrorCode::TooLittleBytesForRequestHeaderVersion as u16), + Error::TooLittleBytesForRequestHeaderVersion + )); + assert!(matches!( + Error::from_error_code(ErrorCode::MalformedCommandHeaderVersion as u16), + Error::MalformedCommandHeaderVersion + )); + assert!(matches!( + Error::from_error_code(ErrorCode::TransferRecordMalformedKey as u16), + Error::TransferRecordMalformedKey + )); + assert!(matches!( + Error::from_error_code(ErrorCode::MalformedCommandHeader as u16), + Error::MalformedCommandHeader + )); + assert!(matches!( + Error::from_error_code(ErrorCode::MalformedCommand as u16), + Error::MalformedCommand + )); + assert!(matches!( + Error::from_error_code(ErrorCode::NotFound as u16), + Error::NotFound + )); + assert!(matches!( + Error::from_error_code(ErrorCode::InternalError as u16), + Error::InternalNodeError + )); + assert!(matches!( + Error::from_error_code(ErrorCode::BadRequest as u16), + Error::BadRequest + )); + assert!(matches!( + Error::from_error_code(ErrorCode::UnsupportedRequest as u16), + Error::UnsupportedRequest + )); + assert!(matches!( + Error::from_error_code(ErrorCode::DictionaryURefNotFound as u16), + Error::DictionaryURefNotFound + )); + assert!(matches!( + Error::from_error_code(ErrorCode::NoCompleteBlocks as u16), + Error::NoCompleteBlocks + )); + assert!(matches!( + Error::from_error_code(ErrorCode::GasPriceToleranceTooLow as u16), + Error::GasPriceToleranceTooLow + )); + assert!(matches!( + Error::from_error_code(ErrorCode::ReceivedV1Transaction as u16), + Error::ReceivedV1Transaction + )); + } +} diff --git a/rpc_sidecar/src/rpcs.rs b/rpc_sidecar/src/rpcs.rs new file mode 100644 index 00000000..9c25e222 --- /dev/null +++ b/rpc_sidecar/src/rpcs.rs @@ -0,0 +1,623 @@ +//! The set of JSON-RPCs which the API server handles. + +use std::convert::{Infallible, TryFrom}; + +pub mod account; +pub mod chain; +mod common; +pub mod docs; +mod error; +mod error_code; +pub mod info; +pub mod speculative_exec; +pub mod speculative_open_rpc_schema; +pub mod state; +pub(crate) mod state_get_auction_info_v2; +#[cfg(test)] +pub(crate) mod test_utils; +mod types; + +use std::{fmt, str, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use http::header::ACCEPT_ENCODING; +use hyper::server::{conn::AddrIncoming, Builder}; +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; +use tokio::sync::oneshot; +use tower::ServiceBuilder; +use tracing::info; +use warp::Filter; + +use casper_json_rpc::{ + CorsOrigin, Error as RpcError, Params, RequestHandlers, RequestHandlersBuilder, + ReservedErrorCode, +}; +use casper_types::SemVer; + +pub use common::ErrorData; +use docs::DocExample; +pub use error::Error; +pub use error_code::ErrorCode; + +use crate::{ClientError, NodeClient}; + +pub const CURRENT_API_VERSION: ApiVersion = ApiVersion(SemVer::new(2, 0, 0)); + +/// This setting causes the server to ignore extra fields in JSON-RPC requests other than the +/// standard 'id', 'jsonrpc', 'method', and 'params' fields. +/// +/// It will be changed to `false` for casper-node v2.0.0. +const ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST: bool = true; + +/// A JSON-RPC requiring the "params" field to be present. +#[async_trait] +pub(super) trait RpcWithParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC request's "params" type. + type RequestParams: Serialize + + for<'de> Deserialize<'de> + + JsonSchema + + DocExample + + Send + + 'static; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Tries to parse the incoming JSON-RPC request's "params" field as `RequestParams`. + fn try_parse_params(maybe_params: Option) -> Result { + let params = match maybe_params { + Some(params) => Value::from(params), + None => { + return Err(RpcError::new( + ReservedErrorCode::InvalidParams, + "Missing 'params' field", + )) + } + }; + serde_json::from_value::(params).map_err(|error| { + RpcError::new( + ReservedErrorCode::InvalidParams, + format!("Failed to parse 'params' field: {error}"), + ) + }) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + let params = Self::try_parse_params(maybe_params)?; + Self::do_handle_request(node_client, params).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Tries to parse the params, and on success, returns the doc example, regardless of the value + /// of the parsed params. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + let _params = Self::try_parse_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result; +} + +/// A JSON-RPC requiring the "params" field to be absent. +#[async_trait] +pub(super) trait RpcWithoutParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Returns an error if the incoming JSON-RPC request's "params" field is not `None` or an empty + /// Array or Object. + fn check_no_params(maybe_params: Option) -> Result<(), RpcError> { + if !maybe_params.unwrap_or_default().is_empty() { + return Err(RpcError::new( + ReservedErrorCode::InvalidParams, + "'params' field should be an empty Array '[]', an empty Object '{}' or absent", + )); + } + Ok(()) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + Self::check_no_params(maybe_params)?; + Self::do_handle_request(node_client).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Checks the params, and on success, returns the doc example. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + Self::check_no_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + ) -> Result; +} + +/// A JSON-RPC where the "params" field is optional. +/// +/// Note that "params" being an empty JSON Array or empty JSON Object is treated the same as if +/// the "params" field is absent - i.e. it represents the `None` case. +#[async_trait] +pub(super) trait RpcWithOptionalParams { + /// The JSON-RPC "method" name. + const METHOD: &'static str; + + /// The JSON-RPC request's "params" type. This will be passed to the handler wrapped in an + /// `Option`. + type OptionalRequestParams: Serialize + + for<'de> Deserialize<'de> + + JsonSchema + + DocExample + + Send + + 'static; + + /// The JSON-RPC response's "result" type. + type ResponseResult: Serialize + + for<'de> Deserialize<'de> + + PartialEq + + JsonSchema + + DocExample + + Send + + 'static; + + /// Tries to parse the incoming JSON-RPC request's "params" field as + /// `Option`. + fn try_parse_params( + maybe_params: Option, + ) -> Result, RpcError> { + let params = match maybe_params { + Some(params) => { + if params.is_empty() { + Value::Null + } else { + Value::from(params) + } + } + None => Value::Null, + }; + serde_json::from_value::>(params).map_err(|error| { + RpcError::new( + ReservedErrorCode::InvalidParams, + format!("Failed to parse 'params' field: {error}"), + ) + }) + } + + /// Registers this RPC as the handler for JSON-RPC requests whose "method" field is the same as + /// `Self::METHOD`. + fn register_as_handler( + node_client: Arc, + handlers_builder: &mut RequestHandlersBuilder, + ) { + let handler = move |maybe_params| { + let node_client = Arc::clone(&node_client); + async move { + let params = Self::try_parse_params(maybe_params)?; + Self::do_handle_request(node_client, params).await + } + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + /// Tries to parse the params, and on success, returns the doc example, regardless of the value + /// of the parsed params. + #[cfg(test)] + fn register_as_test_handler(handlers_builder: &mut RequestHandlersBuilder) { + let handler = move |maybe_params| async move { + let _params = Self::try_parse_params(maybe_params)?; + Ok(Self::ResponseResult::doc_example()) + }; + handlers_builder.register_handler(Self::METHOD, Arc::new(handler)) + } + + async fn do_handle_request( + node_client: Arc, + params: Option, + ) -> Result; +} + +/// Start JSON RPC server with CORS enabled in a background. +pub(super) async fn run_with_cors( + builder: Builder, + handlers: RequestHandlers, + qps_limit: u64, + max_body_bytes: u32, + api_path: &'static str, + server_name: &'static str, + cors_header: CorsOrigin, +) { + let make_svc = hyper::service::make_service_fn(move |_| { + let service_routes = casper_json_rpc::route_with_cors( + api_path, + max_body_bytes, + handlers.clone(), + ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST, + &cors_header, + ); + + // Supports content negotiation for gzip responses. This is an interim fix until + // https://github.com/seanmonstar/warp/pull/513 moves forward. + let service_routes_gzip = warp::header::exact(ACCEPT_ENCODING.as_str(), "gzip") + .and(service_routes.clone()) + .with(warp::compression::gzip()); + + let service = warp::service(service_routes_gzip.or(service_routes)); + async move { Ok::<_, Infallible>(service.clone()) } + }); + + let make_svc = ServiceBuilder::new() + .rate_limit(qps_limit, Duration::from_secs(1)) + .service(make_svc); + + let server = builder.serve(make_svc); + info!(address = %server.local_addr(), "started {server_name} server"); + + let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + let server_with_shutdown = server.with_graceful_shutdown(async { + shutdown_receiver.await.ok(); + }); + + let _ = tokio::spawn(server_with_shutdown).await; + let _ = shutdown_sender.send(()); + info!("{server_name} server shut down"); +} + +/// Start JSON RPC server in a background. +pub(super) async fn run( + builder: Builder, + handlers: RequestHandlers, + qps_limit: u64, + max_body_bytes: u32, + api_path: &'static str, + server_name: &'static str, +) { + let make_svc = hyper::service::make_service_fn(move |_| { + let service_routes = casper_json_rpc::route( + api_path, + max_body_bytes, + handlers.clone(), + ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST, + ); + + // Supports content negotiation for gzip responses. This is an interim fix until + // https://github.com/seanmonstar/warp/pull/513 moves forward. + let service_routes_gzip = warp::header::exact(ACCEPT_ENCODING.as_str(), "gzip") + .and(service_routes.clone()) + .with(warp::compression::gzip()); + + let service = warp::service(service_routes_gzip.or(service_routes)); + async move { Ok::<_, Infallible>(service.clone()) } + }); + + let make_svc = ServiceBuilder::new() + .rate_limit(qps_limit, Duration::from_secs(1)) + .service(make_svc); + + let server = builder.serve(make_svc); + info!(address = %server.local_addr(), "started {server_name} server"); + + let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + let server_with_shutdown = server.with_graceful_shutdown(async { + shutdown_receiver.await.ok(); + }); + + let _ = tokio::spawn(server_with_shutdown).await; + let _ = shutdown_sender.send(()); + info!("{server_name} server shut down"); +} + +#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct ApiVersion(SemVer); + +impl Serialize for ApiVersion { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let str = format!("{}.{}.{}", self.0.major, self.0.minor, self.0.patch); + String::serialize(&str, serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ApiVersion { + fn deserialize>(deserializer: D) -> Result { + let semver = if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + SemVer::try_from(value_as_string.as_str()).map_err(SerdeError::custom)? + } else { + SemVer::deserialize(deserializer)? + }; + Ok(ApiVersion(semver)) + } +} + +impl fmt::Display for ApiVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(test)] +mod tests { + use std::fmt::Write; + + use http::StatusCode; + use warp::{filters::BoxedFilter, Filter, Reply}; + + use casper_json_rpc::{filters, Response}; + use casper_types::DeployHash; + + use super::*; + + async fn send_request( + method: &str, + maybe_params: Option<&str>, + filter: &BoxedFilter<(impl Reply + 'static,)>, + ) -> Response { + let mut body = format!(r#"{{"jsonrpc":"2.0","id":"a","method":"{method}""#); + match maybe_params { + Some(params) => write!(body, r#","params":{}}}"#, params).unwrap(), + None => body += "}", + } + + let http_response = warp::test::request() + .body(body) + .filter(filter) + .await + .unwrap() + .into_response(); + + assert_eq!(http_response.status(), StatusCode::OK); + let body_bytes = hyper::body::to_bytes(http_response.into_body()) + .await + .unwrap(); + serde_json::from_slice(&body_bytes).unwrap() + } + + mod rpc_with_params { + use crate::rpcs::info::{GetDeploy, GetDeployParams, GetDeployResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetDeploy::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_parse_params() { + let filter = main_filter_with_recovery(); + + let params = serde_json::to_string(&GetDeployParams { + deploy_hash: DeployHash::default(), + finalized_approvals: false, + }) + .unwrap(); + let params = Some(params.as_str()); + let rpc_response = send_request(GetDeploy::METHOD, params, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetDeployResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_if_missing_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetDeploy::METHOD, None, &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new(ReservedErrorCode::InvalidParams, "Missing 'params' field") + ); + + let rpc_response = send_request(GetDeploy::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: invalid length 0, expected struct \ + GetDeployParams with 2 elements" + ) + ); + } + + #[tokio::test] + async fn should_return_error_on_failure_to_parse_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetDeploy::METHOD, Some("[3]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: invalid type: integer `3`, expected a string" + ) + ); + } + } + + mod rpc_without_params { + + use crate::rpcs::info::{GetPeers, GetPeersResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetPeers::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_check_no_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetPeers::METHOD, None, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + + let rpc_response = send_request(GetPeers::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + + let rpc_response = send_request(GetPeers::METHOD, Some("{}"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetPeersResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_if_params_not_empty() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetPeers::METHOD, Some("[3]"), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "'params' field should be an empty Array '[]', an empty Object '{}' or absent" + ) + ); + } + } + + mod rpc_with_optional_params { + use casper_types::BlockIdentifier; + + use crate::rpcs::chain::{GetBlock, GetBlockParams, GetBlockResult}; + + use super::*; + + fn main_filter_with_recovery() -> BoxedFilter<(impl Reply,)> { + let mut handlers = RequestHandlersBuilder::new(); + GetBlock::register_as_test_handler(&mut handlers); + let handlers = handlers.build(); + + filters::main_filter(handlers, ALLOW_UNKNOWN_FIELDS_IN_JSON_RPC_REQUEST) + .recover(filters::handle_rejection) + .boxed() + } + + #[tokio::test] + async fn should_parse_without_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetBlock::METHOD, None, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + + let rpc_response = send_request(GetBlock::METHOD, Some("[]"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + + let rpc_response = send_request(GetBlock::METHOD, Some("{}"), &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_parse_with_params() { + let filter = main_filter_with_recovery(); + + let params = serde_json::to_string(&GetBlockParams { + block_identifier: BlockIdentifier::Height(1), + }) + .unwrap(); + let params = Some(params.as_str()); + + let rpc_response = send_request(GetBlock::METHOD, params, &filter).await; + assert_eq!( + rpc_response.result().as_ref(), + Some(GetBlockResult::doc_example()) + ); + } + + #[tokio::test] + async fn should_return_error_on_failure_to_parse_params() { + let filter = main_filter_with_recovery(); + + let rpc_response = send_request(GetBlock::METHOD, Some(r#"["a"]"#), &filter).await; + assert_eq!( + rpc_response.error().unwrap(), + &RpcError::new( + ReservedErrorCode::InvalidParams, + "Failed to parse 'params' field: unknown variant `a`, expected `Hash` or \ + `Height`" + ) + ); + } + } +} diff --git a/rpc_sidecar/src/rpcs/account.rs b/rpc_sidecar/src/rpcs/account.rs new file mode 100644 index 00000000..f143e29e --- /dev/null +++ b/rpc_sidecar/src/rpcs/account.rs @@ -0,0 +1,271 @@ +//! RPCs related to accounts. + +use std::{str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types::{Deploy, DeployHash, Transaction, TransactionHash}; + +use super::{ + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, ClientError, Error, NodeClient, RpcError, RpcWithParams, CURRENT_API_VERSION, +}; + +static PUT_DEPLOY_PARAMS: Lazy = Lazy::new(|| PutDeployParams { + deploy: Deploy::doc_example().clone(), +}); +static PUT_DEPLOY_RESULT: Lazy = Lazy::new(|| PutDeployResult { + api_version: DOCS_EXAMPLE_API_VERSION, + deploy_hash: *Deploy::doc_example().hash(), +}); + +static PUT_TRANSACTION_PARAMS: Lazy = Lazy::new(|| PutTransactionParams { + transaction: Transaction::doc_example().clone(), +}); +static PUT_TRANSACTION_RESULT: Lazy = Lazy::new(|| PutTransactionResult { + api_version: DOCS_EXAMPLE_API_VERSION, + transaction_hash: Transaction::doc_example().hash(), +}); + +/// Params for "account_put_deploy" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutDeployParams { + /// The `Deploy`. + pub deploy: Deploy, +} + +impl DocExample for PutDeployParams { + fn doc_example() -> &'static Self { + &PUT_DEPLOY_PARAMS + } +} + +/// Result for "account_put_deploy" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutDeployResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The deploy hash. + pub deploy_hash: DeployHash, +} + +impl DocExample for PutDeployResult { + fn doc_example() -> &'static Self { + &PUT_DEPLOY_RESULT + } +} + +/// "account_put_deploy" RPC +pub struct PutDeploy {} + +#[async_trait] +impl RpcWithParams for PutDeploy { + const METHOD: &'static str = "account_put_deploy"; + type RequestParams = PutDeployParams; + type ResponseResult = PutDeployResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let deploy_hash = *params.deploy.hash(); + match node_client + .try_accept_transaction(params.deploy.into()) + .await + { + Ok(()) => Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + deploy_hash, + }), + Err(ClientError::InvalidTransaction(err)) => Err(Error::InvalidDeploy(err).into()), + Err(err) => Err(Error::NodeRequest("submitting a deploy", err).into()), + } + } +} + +/// Params for "account_put_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionParams { + /// The `Transaction`. + pub transaction: Transaction, +} + +impl DocExample for PutTransactionParams { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_PARAMS + } +} + +/// Result for "account_put_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The transaction hash. + pub transaction_hash: TransactionHash, +} + +impl DocExample for PutTransactionResult { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_RESULT + } +} + +/// "account_put_transaction" RPC +pub struct PutTransaction {} + +#[async_trait] +impl RpcWithParams for PutTransaction { + const METHOD: &'static str = "account_put_transaction"; + type RequestParams = PutTransactionParams; + type ResponseResult = PutTransactionResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let transaction_hash = params.transaction.hash(); + match node_client.try_accept_transaction(params.transaction).await { + Ok(()) => Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + transaction_hash, + }), + Err(ClientError::InvalidTransaction(err)) => Err(Error::InvalidTransaction(err).into()), + Err(err) => Err(Error::NodeRequest("submitting a transaction", err).into()), + } + } +} + +#[cfg(test)] +mod tests { + use casper_binary_port::{ + BinaryResponse, BinaryResponseAndRequest, Command, ErrorCode as BinaryPortErrorCode, + }; + use casper_types::{bytesrepr::Bytes, testing::TestRng}; + use pretty_assertions::assert_eq; + + use crate::rpcs::ErrorCode; + + use super::*; + + #[tokio::test] + async fn should_put_deploy() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::TryAcceptTransaction { .. } => Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )), + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let res = PutDeploy::do_handle_request( + Arc::new(ClientMock), + PutDeployParams { + deploy: deploy.clone(), + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + PutDeployResult { + api_version: CURRENT_API_VERSION, + deploy_hash: *deploy.hash(), + } + ) + } + + #[tokio::test] + async fn should_put_transaction() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::TryAcceptTransaction { .. } => Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )), + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let res = PutTransaction::do_handle_request( + Arc::new(ClientMock), + PutTransactionParams { + transaction: transaction.clone(), + }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + PutTransactionResult { + api_version: CURRENT_API_VERSION, + transaction_hash: transaction.hash(), + } + ) + } + + #[tokio::test] + async fn should_handle_transaction_error() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::TryAcceptTransaction { .. } => Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_error(BinaryPortErrorCode::InvalidTransactionBodyHash), + Bytes::from(vec![]), + )), + _ => unimplemented!(), + } + } + } + + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let err = PutTransaction::do_handle_request( + Arc::new(ClientMock), + PutTransactionParams { + transaction: transaction.clone(), + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::InvalidTransaction as i64,) + } +} diff --git a/rpc_sidecar/src/rpcs/chain.rs b/rpc_sidecar/src/rpcs/chain.rs new file mode 100644 index 00000000..2c1ab34b --- /dev/null +++ b/rpc_sidecar/src/rpcs/chain.rs @@ -0,0 +1,844 @@ +//! RPCs related to the block chain. + +mod era_summary; + +use std::{clone::Clone, str, sync::Arc}; + +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + global_state::TrieMerkleProof, BlockHash, BlockHeader, BlockHeaderV2, BlockIdentifier, Digest, + GlobalStateIdentifier, JsonBlockWithSignatures, Key, StoredValue, Transfer, +}; + +use super::{ + common, + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithOptionalParams, CURRENT_API_VERSION, +}; +pub use era_summary::EraSummary; +use era_summary::ERA_SUMMARY; + +static GET_BLOCK_PARAMS: Lazy = Lazy::new(|| GetBlockParams { + block_identifier: BlockIdentifier::Hash(*JsonBlockWithSignatures::example().block.hash()), +}); +static GET_BLOCK_RESULT: Lazy = Lazy::new(|| GetBlockResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::example().clone()), +}); +static GET_BLOCK_TRANSFERS_PARAMS: Lazy = + Lazy::new(|| GetBlockTransfersParams { + block_identifier: BlockIdentifier::Hash(*BlockHash::example()), + }); +static GET_BLOCK_TRANSFERS_RESULT: Lazy = + Lazy::new(|| GetBlockTransfersResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_hash: Some(*BlockHash::example()), + transfers: Some(vec![Transfer::example().clone()]), + }); +static GET_STATE_ROOT_HASH_PARAMS: Lazy = + Lazy::new(|| GetStateRootHashParams { + block_identifier: BlockIdentifier::Height(BlockHeaderV2::example().height()), + }); +static GET_STATE_ROOT_HASH_RESULT: Lazy = + Lazy::new(|| GetStateRootHashResult { + api_version: DOCS_EXAMPLE_API_VERSION, + state_root_hash: Some(*BlockHeaderV2::example().state_root_hash()), + }); +static GET_ERA_INFO_PARAMS: Lazy = Lazy::new(|| GetEraInfoParams { + block_identifier: BlockIdentifier::Hash(ERA_SUMMARY.block_hash), +}); +static GET_ERA_INFO_RESULT: Lazy = Lazy::new(|| GetEraInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + era_summary: Some(ERA_SUMMARY.clone()), +}); +static GET_ERA_SUMMARY_PARAMS: Lazy = Lazy::new(|| GetEraSummaryParams { + block_identifier: BlockIdentifier::Hash(ERA_SUMMARY.block_hash), +}); +static GET_ERA_SUMMARY_RESULT: Lazy = Lazy::new(|| GetEraSummaryResult { + api_version: DOCS_EXAMPLE_API_VERSION, + era_summary: ERA_SUMMARY.clone(), +}); + +/// Params for "chain_get_block" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetBlockParams { + fn doc_example() -> &'static Self { + &GET_BLOCK_PARAMS + } +} + +/// Result for "chain_get_block" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block, if found. + pub block_with_signatures: Option, +} + +impl DocExample for GetBlockResult { + fn doc_example() -> &'static Self { + &GET_BLOCK_RESULT + } +} + +/// "chain_get_block" RPC. +pub struct GetBlock {} + +#[async_trait] +impl RpcWithOptionalParams for GetBlock { + const METHOD: &'static str = "chain_get_block"; + type OptionalRequestParams = GetBlockParams; + type ResponseResult = GetBlockResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let (block, signatures) = common::get_block_with_signatures(&*node_client, identifier) + .await? + .into_inner(); + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(block, Some(signatures))), + }) + } +} + +/// Params for "chain_get_block_transfers" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockTransfersParams { + /// The block hash. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetBlockTransfersParams { + fn doc_example() -> &'static Self { + &GET_BLOCK_TRANSFERS_PARAMS + } +} + +/// Result for "chain_get_block_transfers" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBlockTransfersResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block hash, if found. + pub block_hash: Option, + /// The block's transfers, if found. + pub transfers: Option>, +} + +impl DocExample for GetBlockTransfersResult { + fn doc_example() -> &'static Self { + &GET_BLOCK_TRANSFERS_RESULT + } +} + +/// "chain_get_block_transfers" RPC. +pub struct GetBlockTransfers {} + +#[async_trait] +impl RpcWithOptionalParams for GetBlockTransfers { + const METHOD: &'static str = "chain_get_block_transfers"; + type OptionalRequestParams = GetBlockTransfersParams; + type ResponseResult = GetBlockTransfersResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let header = common::get_block_header(&*node_client, identifier).await?; + let transfers = node_client + .read_block_transfers(header.block_hash()) + .await + .map_err(|err| Error::NodeRequest("block transfers", err))?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_hash: Some(header.block_hash()), + transfers, + }) + } +} + +/// Params for "chain_get_state_root_hash" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStateRootHashParams { + /// The block hash. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetStateRootHashParams { + fn doc_example() -> &'static Self { + &GET_STATE_ROOT_HASH_PARAMS + } +} + +/// Result for "chain_get_state_root_hash" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStateRootHashResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// Hex-encoded hash of the state root. + pub state_root_hash: Option, +} + +impl DocExample for GetStateRootHashResult { + fn doc_example() -> &'static Self { + &GET_STATE_ROOT_HASH_RESULT + } +} + +/// "chain_get_state_root_hash" RPC. +pub struct GetStateRootHash {} + +#[async_trait] +impl RpcWithOptionalParams for GetStateRootHash { + const METHOD: &'static str = "chain_get_state_root_hash"; + type OptionalRequestParams = GetStateRootHashParams; + type ResponseResult = GetStateRootHashResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, identifier).await?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + state_root_hash: Some(*block_header.state_root_hash()), + }) + } +} + +/// Params for "chain_get_era_info" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraInfoParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetEraInfoParams { + fn doc_example() -> &'static Self { + &GET_ERA_INFO_PARAMS + } +} + +/// Result for "chain_get_era_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The era summary. + pub era_summary: Option, +} + +impl DocExample for GetEraInfoResult { + fn doc_example() -> &'static Self { + &GET_ERA_INFO_RESULT + } +} + +/// "chain_get_era_info_by_switch_block" RPC +pub struct GetEraInfoBySwitchBlock {} + +#[async_trait] +impl RpcWithOptionalParams for GetEraInfoBySwitchBlock { + const METHOD: &'static str = "chain_get_era_info_by_switch_block"; + type OptionalRequestParams = GetEraInfoParams; + type ResponseResult = GetEraInfoResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let block_header = match maybe_params { + Some(params) => { + common::get_block_header(&*node_client, Some(params.block_identifier)).await? + } + None => common::get_latest_switch_block_header(&*node_client).await?, + }; + let era_summary = if block_header.is_switch_block() { + Some(get_era_summary_by_block(node_client, &block_header).await?) + } else { + None + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + era_summary, + }) + } +} + +/// Params for "chain_get_era_summary" RPC response. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraSummaryParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetEraSummaryParams { + fn doc_example() -> &'static Self { + &GET_ERA_SUMMARY_PARAMS + } +} + +/// Result for "chain_get_era_summary" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetEraSummaryResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The era summary. + pub era_summary: EraSummary, +} + +impl DocExample for GetEraSummaryResult { + fn doc_example() -> &'static Self { + &GET_ERA_SUMMARY_RESULT + } +} + +/// "chain_get_era_summary" RPC +pub struct GetEraSummary {} + +#[async_trait] +impl RpcWithOptionalParams for GetEraSummary { + const METHOD: &'static str = "chain_get_era_summary"; + type OptionalRequestParams = GetEraSummaryParams; + type ResponseResult = GetEraSummaryResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let block_header = match maybe_params { + Some(params) => { + common::get_block_header(&*node_client, Some(params.block_identifier)).await? + } + None => common::get_latest_switch_block_header(&*node_client).await?, + }; + + let era_summary = get_era_summary_by_block(node_client, &block_header).await?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + era_summary, + }) + } +} + +async fn get_era_summary_by_block( + node_client: Arc, + block_header: &BlockHeader, +) -> Result { + fn create_era_summary( + block_header: &BlockHeader, + stored_value: StoredValue, + merkle_proof: Vec>, + ) -> Result { + Ok(EraSummary { + block_hash: block_header.block_hash(), + era_id: block_header.era_id(), + stored_value, + state_root_hash: *block_header.state_root_hash(), + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } + + let state_identifier = GlobalStateIdentifier::StateRootHash(*block_header.state_root_hash()); + let result = node_client + .query_global_state(Some(state_identifier), Key::EraSummary, vec![]) + .await + .map_err(|err| Error::NodeRequest("era summary", err))?; + + let era_summary = if let Some(result) = result { + let (value, merkle_proof) = result.into_inner(); + create_era_summary(block_header, value, merkle_proof)? + } else { + let (result, merkle_proof) = node_client + .query_global_state( + Some(state_identifier), + Key::EraInfo(block_header.era_id()), + vec![], + ) + .await + .map_err(|err| Error::NodeRequest("era info", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + create_era_summary(block_header, result, merkle_proof)? + }; + Ok(era_summary) +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use crate::{rpcs::test_utils::BinaryPortMock, ClientError}; + use casper_binary_port::{ + BinaryResponse, BinaryResponseAndRequest, Command, GetRequest, GlobalStateEntityQualifier, + GlobalStateQueryResult, InformationRequest, InformationRequestTag, RecordId, + }; + use casper_types::{ + bytesrepr::Bytes, + system::auction::{DelegatorKind, EraInfo, SeigniorageAllocation}, + testing::TestRng, + AsymmetricType, Block, BlockSignaturesV1, BlockSignaturesV2, BlockWithSignatures, + ChainNameDigest, PublicKey, TestBlockBuilder, TestBlockV1Builder, U512, + }; + use pretty_assertions::assert_eq; + use rand::Rng; + + use super::*; + + #[tokio::test] + async fn should_read_block_v2() { + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + ChainNameDigest::random(rng), + ); + let resp = GetBlock::do_handle_request( + Arc::new(ValidBlockMock { + block: BlockWithSignatures::new(block.clone(), signatures.into()), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(block, None)), + } + ); + } + + #[tokio::test] + async fn should_read_block_v1() { + let rng = &mut TestRng::new(); + let block = TestBlockV1Builder::new().build(rng); + + let resp = GetBlock::do_handle_request( + Arc::new(ValidBlockMock { + block: BlockWithSignatures::new( + Block::V1(block.clone()), + BlockSignaturesV1::new(*block.hash(), block.era_id()).into(), + ), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockResult { + api_version: CURRENT_API_VERSION, + block_with_signatures: Some(JsonBlockWithSignatures::new(Block::V1(block), None)), + } + ); + } + + #[tokio::test] + async fn should_read_block_transfers() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let mut transfers = vec![]; + for _ in 0..rng.gen_range(0..10) { + transfers.push(Transfer::random(rng)); + } + let signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + ChainNameDigest::random(rng), + ); + let resp = GetBlockTransfers::do_handle_request( + Arc::new(ValidBlockMock { + block: BlockWithSignatures::new(Block::V2(block.clone()), signatures.into()), + transfers: transfers.clone(), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBlockTransfersResult { + api_version: CURRENT_API_VERSION, + block_hash: Some(*block.hash()), + transfers: Some(transfers), + } + ); + } + + #[tokio::test] + async fn should_read_block_state_root_hash() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + ChainNameDigest::random(rng), + ); + let resp = GetStateRootHash::do_handle_request( + Arc::new(ValidBlockMock { + block: BlockWithSignatures::new(Block::V2(block.clone()), signatures.into()), + transfers: vec![], + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetStateRootHashResult { + api_version: CURRENT_API_VERSION, + state_root_hash: Some(*block.state_root_hash()), + } + ); + } + + #[tokio::test] + async fn should_read_block_era_summary() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let resp = GetEraSummary::do_handle_request( + Arc::new(ValidEraSummaryMock { + block: Block::V2(block.clone()), + expect_no_block_identifier: true, + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraSummaryResult { + api_version: CURRENT_API_VERSION, + era_summary: EraSummary { + block_hash: *block.hash(), + era_id: block.era_id(), + stored_value: StoredValue::EraInfo(EraInfo::new()), + state_root_hash: *block.state_root_hash(), + merkle_proof: String::from("00000000"), + } + } + ); + } + + #[tokio::test] + async fn should_read_block_era_summary_with_block_id() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let resp = GetEraSummary::do_handle_request( + Arc::new(ValidEraSummaryMock { + block: Block::V2(block.clone()), + expect_no_block_identifier: false, + }), + Some(GetEraSummaryParams { + block_identifier: BlockIdentifier::Hash(*block.hash()), + }), + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraSummaryResult { + api_version: CURRENT_API_VERSION, + era_summary: EraSummary { + block_hash: *block.hash(), + era_id: block.era_id(), + stored_value: StoredValue::EraInfo(EraInfo::new()), + state_root_hash: *block.state_root_hash(), + merkle_proof: String::from("00000000"), + } + } + ); + } + + #[tokio::test] + async fn should_read_block_era_info_by_switch_block() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + + let block_header = TestBlockV1Builder::new() + .switch_block(true) + .build_versioned(rng) + .clone_header(); + binary_port_mock + .add_block_header_req_res( + block_header.clone(), + InformationRequest::LatestSwitchBlockHeader, + ) + .await; + let era_info = example_era_info(); + binary_port_mock + .add_era_info_req_res( + era_info.clone(), + Some(GlobalStateIdentifier::StateRootHash( + *block_header.state_root_hash(), + )), + ) + .await; + + let resp = GetEraInfoBySwitchBlock::do_handle_request(Arc::new(binary_port_mock), None) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraInfoResult { + api_version: CURRENT_API_VERSION, + era_summary: Some(EraSummary { + block_hash: block_header.block_hash(), + era_id: block_header.era_id(), + stored_value: StoredValue::EraInfo(era_info), + state_root_hash: *block_header.state_root_hash(), + merkle_proof: String::from("00000000"), + }) + } + ); + } + + #[tokio::test] + async fn should_read_block_era_info_by_switch_block_with_block_id() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + + let block_header = TestBlockV1Builder::new() + .switch_block(true) + .build_versioned(rng) + .clone_header(); + binary_port_mock + .add_block_header_req_res( + block_header.clone(), + InformationRequest::BlockHeader(Some(BlockIdentifier::Height(150))), + ) + .await; + let era_info = example_era_info(); + binary_port_mock + .add_era_info_req_res( + era_info.clone(), + Some(GlobalStateIdentifier::StateRootHash( + *block_header.state_root_hash(), + )), + ) + .await; + + let resp = GetEraInfoBySwitchBlock::do_handle_request( + Arc::new(binary_port_mock), + Some(GetEraInfoParams { + block_identifier: BlockIdentifier::Height(150), + }), + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetEraInfoResult { + api_version: CURRENT_API_VERSION, + era_summary: Some(EraSummary { + block_hash: block_header.block_hash(), + era_id: block_header.era_id(), + stored_value: StoredValue::EraInfo(era_info), + state_root_hash: *block_header.state_root_hash(), + merkle_proof: String::from("00000000"), + }) + } + ); + } + + #[tokio::test] + async fn should_read_none_block_era_info_by_switch_block_for_non_switch() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + + let block_header = TestBlockV1Builder::new() + .switch_block(false) + .build_versioned(rng) + .clone_header(); + binary_port_mock + .add_block_header_req_res( + block_header.clone(), + InformationRequest::LatestSwitchBlockHeader, + ) + .await; + let resp = GetEraInfoBySwitchBlock::do_handle_request(Arc::new(binary_port_mock), None) + .await + .expect("should handle request"); + assert_eq!( + resp, + GetEraInfoResult { + api_version: CURRENT_API_VERSION, + era_summary: None + } + ); + } + + struct ValidBlockMock { + block: BlockWithSignatures, + transfers: Vec, + } + + #[async_trait] + impl NodeClient for ValidBlockMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockWithSignatures) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.block().clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::Record { + record_type_tag, .. + }) if RecordId::try_from(record_type_tag) == Ok(RecordId::Transfer) => { + Ok(BinaryResponseAndRequest::new_legacy_test_response( + RecordId::Transfer, + &self.transfers, + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidEraSummaryMock { + block: Block, + expect_no_block_identifier: bool, + } + + #[async_trait] + impl NodeClient for ValidEraSummaryMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + let expected_tag = if self.expect_no_block_identifier { + InformationRequestTag::LatestSwitchBlockHeader + } else { + InformationRequestTag::BlockHeader + }; + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) == Ok(expected_tag) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::EraSummary, + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(GlobalStateQueryResult::new( + StoredValue::EraInfo(EraInfo::new()), + vec![], + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + fn example_era_info() -> EraInfo { + let delegator_amount = U512::from(1000); + let validator_amount = U512::from(2000); + let delegator_public_key = PublicKey::from_hex( + "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", + ) + .unwrap(); + let validator_public_key = PublicKey::from_hex( + "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + ) + .unwrap(); + let delegator_kind = DelegatorKind::PublicKey(delegator_public_key); + let delegator = SeigniorageAllocation::delegator_kind( + delegator_kind, + validator_public_key, + delegator_amount, + ); + let validator = SeigniorageAllocation::validator( + PublicKey::from_hex( + "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + ) + .unwrap(), + validator_amount, + ); + let seigniorage_allocations = vec![delegator, validator]; + let mut era_info = EraInfo::new(); + *era_info.seigniorage_allocations_mut() = seigniorage_allocations; + era_info + } +} diff --git a/rpc_sidecar/src/rpcs/chain/era_summary.rs b/rpc_sidecar/src/rpcs/chain/era_summary.rs new file mode 100644 index 00000000..3d5e4f80 --- /dev/null +++ b/rpc_sidecar/src/rpcs/chain/era_summary.rs @@ -0,0 +1,58 @@ +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + system::auction::{DelegatorKind, EraInfo, SeigniorageAllocation}, + AsymmetricType, BlockHash, BlockV2, Digest, EraId, PublicKey, StoredValue, U512, +}; + +use crate::rpcs::common::MERKLE_PROOF; + +pub(super) static ERA_SUMMARY: Lazy = Lazy::new(|| { + let delegator_amount = U512::from(1000); + let validator_amount = U512::from(2000); + let delegator_public_key = + PublicKey::from_hex("01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18") + .unwrap(); + let validator_public_key = + PublicKey::from_hex("012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876") + .unwrap(); + let delegator_kind = DelegatorKind::PublicKey(delegator_public_key); + let delegator = SeigniorageAllocation::delegator_kind( + delegator_kind, + validator_public_key, + delegator_amount, + ); + let validator = SeigniorageAllocation::validator( + PublicKey::from_hex("012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876") + .unwrap(), + validator_amount, + ); + let seigniorage_allocations = vec![delegator, validator]; + let mut era_info = EraInfo::new(); + *era_info.seigniorage_allocations_mut() = seigniorage_allocations; + EraSummary { + block_hash: *BlockV2::example().hash(), + era_id: EraId::from(42), + stored_value: StoredValue::EraInfo(era_info), + state_root_hash: *BlockV2::example().state_root_hash(), + merkle_proof: MERKLE_PROOF.clone(), + } +}); + +/// The summary of an era +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct EraSummary { + /// The block hash + pub block_hash: BlockHash, + /// The era id + pub era_id: EraId, + /// The StoredValue containing era information + pub stored_value: StoredValue, + /// Hex-encoded hash of the state root + pub state_root_hash: Digest, + /// The Merkle proof + pub merkle_proof: String, +} diff --git a/rpc_sidecar/src/rpcs/common.rs b/rpc_sidecar/src/rpcs/common.rs new file mode 100644 index 00000000..e75f3879 --- /dev/null +++ b/rpc_sidecar/src/rpcs/common.rs @@ -0,0 +1,238 @@ +use std::collections::BTreeMap; + +use casper_binary_port::KeyPrefix; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::rpcs::error::Error; +use casper_types::{ + bytesrepr::ToBytes, contracts::ContractPackage, global_state::TrieMerkleProof, Account, + AddressableEntity, AvailableBlockRange, BlockHeader, BlockIdentifier, BlockWithSignatures, + ByteCode, Contract, ContractWasm, EntityAddr, EntryPointValue, GlobalStateIdentifier, Key, + NamedKeys, Package, StoredValue, +}; + +use crate::NodeClient; + +pub(super) static MERKLE_PROOF: Lazy = Lazy::new(|| { + String::from( + "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e\ + 55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3\ + f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a\ + 7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41d\ + d035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce9450022\ + 6a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7\ + 725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60\ + bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d0000030\ + 00000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467\ + a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c\ + 1bcbcee522649d2b135fe510fe3") +}); + +/// An enum to be used as the `data` field of a JSON-RPC error response. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields, untagged)] +pub enum ErrorData { + /// The requested block of state root hash is not available on this node. + MissingBlockOrStateRoot { + /// Additional info. + message: String, + /// The height range (inclusive) of fully available blocks. + available_block_range: AvailableBlockRange, + }, +} + +/// An addressable entity or a legacy account or contract. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub enum EntityWithBackwardCompat { + /// An addressable entity. + AddressableEntity { + /// The addressable entity. + entity: AddressableEntity, + /// The named keys of the addressable entity. + named_keys: NamedKeys, + /// The entry points of the addressable entity. + entry_points: Vec, + /// The bytecode of the addressable entity. Returned when `include_bytecode` is `true`. + bytecode: Option, + }, + /// An account. + Account(Account), + /// A contract. + Contract { + /// The contract. + contract: Contract, + /// The Wasm code of the contract. Returned when `include_bytecode` is `true`. + wasm: Option, + }, +} + +/// A package or a legacy contract package. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub enum PackageWithBackwardCompat { + /// A package. + Package(Package), + /// A contract package. + ContractPackage(ContractPackage), +} + +/// Byte code of an entity with a proof. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct ByteCodeWithProof { + code: ByteCode, + merkle_proof: String, +} + +impl ByteCodeWithProof { + /// Creates a new `ByteCodeWithProof`. + pub fn new(code: ByteCode, merkle_proof: String) -> Self { + Self { code, merkle_proof } + } +} + +/// Wasm code of a contract with a proof. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct ContractWasmWithProof { + wasm: ContractWasm, + merkle_proof: String, +} + +impl ContractWasmWithProof { + /// Creates a new `ContractWasmWithProof`. + pub fn new(wasm: ContractWasm, merkle_proof: String) -> Self { + Self { wasm, merkle_proof } + } +} + +pub async fn get_block_with_signatures( + node_client: &dyn NodeClient, + identifier: Option, +) -> Result { + match node_client + .read_block_with_signatures(identifier) + .await + .map_err(|err| Error::NodeRequest("block with signatures", err))? + { + Some(block) => Ok(block), + None => { + let available_range = node_client + .read_available_block_range() + .await + .map_err(|err| Error::NodeRequest("available block range", err))?; + Err(Error::NoBlockFound(identifier, available_range)) + } + } +} + +pub async fn get_block_header( + node_client: &dyn NodeClient, + identifier: Option, +) -> Result { + match node_client + .read_block_header(identifier) + .await + .map_err(|err| Error::NodeRequest("block header", err))? + { + Some(header) => Ok(header), + None => { + let available_range = node_client + .read_available_block_range() + .await + .map_err(|err| Error::NodeRequest("available block range", err))?; + Err(Error::NoBlockFound(identifier, available_range)) + } + } +} + +pub async fn get_latest_switch_block_header( + node_client: &dyn NodeClient, +) -> Result { + match node_client + .read_latest_switch_block_header() + .await + .map_err(|err| Error::NodeRequest("latest switch block header", err))? + { + Some(header) => Ok(header), + None => { + let available_range = node_client + .read_available_block_range() + .await + .map_err(|err| Error::NodeRequest("available block range", err))?; + Err(Error::NoBlockFound(None, available_range)) + } + } +} + +pub async fn get_entity_named_keys( + node_client: &dyn NodeClient, + entity_addr: EntityAddr, + state_identifier: Option, +) -> Result { + let stored_values = node_client + .query_global_state_by_prefix(state_identifier, KeyPrefix::NamedKeysByEntity(entity_addr)) + .await + .map_err(|err| Error::NodeRequest("entity named keys", err))?; + let named_keys = stored_values + .into_iter() + .map(|stored_value| { + if let StoredValue::NamedKey(named_key) = stored_value { + let key = named_key + .get_key() + .map_err(|err| Error::InvalidNamedKeys(err.to_string()))?; + let name = named_key + .get_name() + .map_err(|err| Error::InvalidNamedKeys(err.to_string()))?; + Ok((name, key)) + } else { + Err(Error::InvalidNamedKeys(format!( + "unexpected stored value: {}", + stored_value.type_name() + ))) + } + }) + .collect::, Error>>()?; + Ok(NamedKeys::from(named_keys)) +} + +pub async fn get_entity_entry_points( + node_client: &dyn NodeClient, + entity_addr: EntityAddr, + state_identifier: Option, +) -> Result, Error> { + let stored_values_v1 = node_client + .query_global_state_by_prefix( + state_identifier, + KeyPrefix::EntryPointsV1ByEntity(entity_addr), + ) + .await + .map_err(|err| Error::NodeRequest("entity named keys", err))?; + let stored_values_v2 = node_client + .query_global_state_by_prefix( + state_identifier, + KeyPrefix::EntryPointsV2ByEntity(entity_addr), + ) + .await + .map_err(|err| Error::NodeRequest("entity named keys", err))?; + + stored_values_v1 + .into_iter() + .chain(stored_values_v2) + .map(|stored_value| { + if let StoredValue::EntryPoint(entry_point) = stored_value { + Ok(entry_point) + } else { + Err(Error::InvalidNamedKeys(format!( + "unexpected stored value: {}", + stored_value.type_name() + ))) + } + }) + .collect::>() +} + +pub fn encode_proof(proof: &Vec>) -> Result { + Ok(base16::encode_lower( + &proof.to_bytes().map_err(Error::BytesreprFailure)?, + )) +} diff --git a/rpc_sidecar/src/rpcs/docs.rs b/rpc_sidecar/src/rpcs/docs.rs new file mode 100644 index 00000000..bf907812 --- /dev/null +++ b/rpc_sidecar/src/rpcs/docs.rs @@ -0,0 +1,619 @@ +//! RPCs related to finding information about currently supported RPCs. + +use std::sync::Arc; + +use async_trait::async_trait; +use derive_new::new; +use once_cell::sync::Lazy; +use schemars::{ + gen::{SchemaGenerator, SchemaSettings}, + schema::Schema, + JsonSchema, Map, MapEntry, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; + +use super::{ + account::{PutDeploy, PutTransaction}, + chain::{ + GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, + }, + info::{ + GetChainspec, GetDeploy, GetPeers, GetReward, GetStatus, GetTransaction, + GetValidatorChanges, + }, + state::{ + GetAccountInfo, GetAddressableEntity, GetAuctionInfo, GetBalance, GetDictionaryItem, + GetItem, GetPackage, QueryBalance, QueryBalanceDetails, QueryGlobalState, + }, + state_get_auction_info_v2::GetAuctionInfo as GetAuctionInfoV2, + ApiVersion, NodeClient, RpcError, RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, + CURRENT_API_VERSION, +}; + +pub(crate) const DOCS_EXAMPLE_API_VERSION: ApiVersion = CURRENT_API_VERSION; + +const DEFINITIONS_PATH: &str = "#/components/schemas/"; +pub(crate) const OPEN_RPC_VERSION: &str = "1.0.0-rc1"; + +pub(crate) static CONTACT: Lazy = Lazy::new(|| OpenRpcContactField { + name: "Casper Labs".to_string(), + url: "https://casperlabs.io".to_string(), +}); + +pub(crate) static LICENSE: Lazy = Lazy::new(|| OpenRpcLicenseField { + name: "APACHE LICENSE, VERSION 2.0".to_string(), + url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(), +}); + +static SERVER: Lazy = Lazy::new(|| { + OpenRpcServerEntry::new( + "any Sidecar with JSON RPC API enabled".to_string(), + "http://IP:PORT/rpc/".to_string(), + ) +}); + +// As per https://spec.open-rpc.org/#service-discovery-method. +pub(crate) static OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { + let info = OpenRpcInfoField { + version: DOCS_EXAMPLE_API_VERSION.to_string(), + title: "Client API of Casper Node".to_string(), + description: "This describes the JSON-RPC 2.0 API of a node on the Casper network." + .to_string(), + contact: CONTACT.clone(), + license: LICENSE.clone(), + }; + let mut schema = OpenRpcSchema::new(OPEN_RPC_VERSION.to_string(), info, vec![SERVER.clone()]); + + schema.push_with_params::( + "receives a Deploy to be executed by the network (DEPRECATED: use \ + `account_put_transaction` instead)", + ); + schema + .push_with_params::("receives a Transaction to be executed by the network"); + schema.push_with_params::( + "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", + ); + schema.push_with_params::("returns a Transaction from the network"); + schema.push_with_params::("returns an Account from the network"); + schema + .push_with_params::("returns an AddressableEntity from the network"); + schema.push_with_params::("returns a Package from the network"); + schema.push_with_params::("returns an item from a Dictionary"); + schema.push_with_params::( + "a query to global state using either a Block hash or state root hash", + ); + schema.push_with_params::( + "query for a balance using a purse identifier and a state identifier", + ); + schema.push_with_params::( + "query for full balance information using a purse identifier and a state identifier", + ); + schema.push_without_params::("returns a list of peers connected to the node"); + schema.push_without_params::("returns the current status of the node"); + schema.push_with_params::( + "returns the reward for a given era and a validator or a delegator", + ); + schema + .push_without_params::("returns status changes of active validators"); + schema.push_without_params::( + "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and \ + global_state.toml files", + ); + schema.push_with_optional_params::("returns a Block from the network"); + schema.push_with_optional_params::( + "returns all transfers for a Block from the network", + ); + schema.push_with_optional_params::( + "returns a state root hash at a given Block", + ); + schema.push_with_params::( + "returns a stored value from the network. This RPC is deprecated, use \ + `query_global_state` instead.", + ); + schema.push_with_params::("returns a purse's balance from the network"); + schema.push_with_optional_params::( + "returns an EraInfo from the network", + ); + schema.push_with_optional_params::( + "returns the bids and validators as of either a specific block (by height or hash), or \ + the most recently added block. This is a casper 1.x retro-compatibility endpoint. For blocks created in 1.x protocol it will work exactly the same as it used to. + For 2.x blocks it will try to retrofit the changed data structure into previous schema - but it is a lossy process. Use `state_get_auction_info_v2` endpoint to get data in new format. *IMPORTANT* This method is deprecated, has been added only for compatibility with retired nodes json-rpc API and will be removed in a future release of sidecar.", + ); + schema.push_with_optional_params::( + "returns the bids and validators as of either a specific block (by height or hash), or \ + the most recently added block. It works for blocks created in 1.x and 2.x", + ); + schema.push_with_optional_params::( + "returns the era summary at either a specific block (by height or hash), or the most \ + recently added block", + ); + + schema +}); +static LIST_RPCS_RESULT: Lazy = Lazy::new(|| RpcDiscoverResult { + api_version: DOCS_EXAMPLE_API_VERSION, + name: "OpenRPC Schema".to_string(), + schema: OPEN_RPC_SCHEMA.clone(), +}); + +/// A trait used to generate a static hardcoded example of `Self`. +pub trait DocExample { + /// Generates a hardcoded example of `Self`. + fn doc_example() -> &'static Self; +} + +/// The main schema for the casper node's RPC server, compliant with +/// [the OpenRPC Specification](https://spec.open-rpc.org). +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct OpenRpcSchema { + openrpc: String, + info: OpenRpcInfoField, + servers: Vec, + methods: Vec, + components: Components, +} + +impl OpenRpcSchema { + pub fn new(openrpc: String, info: OpenRpcInfoField, servers: Vec) -> Self { + OpenRpcSchema { + openrpc, + info, + servers, + methods: vec![], + components: Components::default(), + } + } + + fn new_generator() -> SchemaGenerator { + let settings = SchemaSettings::default().with(|settings| { + settings.definitions_path = DEFINITIONS_PATH.to_string(); + }); + settings.into_generator() + } + + pub(crate) fn push_with_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let params_schema = T::RequestParams::json_schema(&mut generator); + let params = Self::make_params(params_schema); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_with_params::()]; + + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params, + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + self.update_schemas::(); + } + + pub(crate) fn push_without_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_without_params::()]; + + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params: vec![], + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + } + + fn push_with_optional_params(&mut self, summary: &str) { + let mut generator = Self::new_generator(); + + let params_schema = T::OptionalRequestParams::json_schema(&mut generator); + let params = Self::make_optional_params(params_schema); + + let result_schema = T::ResponseResult::json_schema(&mut generator); + let result = ResponseResult { + name: format!("{}_result", T::METHOD), + schema: result_schema, + }; + + let examples = vec![Example::from_rpc_with_optional_params::()]; + + // TODO - handle adding a description that the params may be omitted if desired. + let method = Method { + name: T::METHOD.to_string(), + summary: summary.to_string(), + params, + result, + examples, + }; + + self.methods.push(method); + self.update_schemas::(); + self.update_schemas::(); + } + + /// Convert the schema for the params type for T into the OpenRpc-compatible map of name, value + /// pairs. + /// + /// As per the standard, the required params must be sorted before the optional ones. + fn make_params(schema: Schema) -> Vec { + let schema_object = schema.into_object().object.expect("should be object"); + let mut required_params = schema_object + .properties + .iter() + .filter(|(name, _)| schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: true, + }) + .collect::>(); + let optional_params = schema_object + .properties + .iter() + .filter(|(name, _)| !schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>(); + required_params.extend(optional_params); + required_params + } + + /// Convert the schema for the optional params type for T into the OpenRpc-compatible map of + /// name, value pairs. + /// + /// Since all params must be unanimously optional, mark all incorrectly tagged "required" fields + /// as false. + fn make_optional_params(schema: Schema) -> Vec { + let schema_object = schema.into_object().object.expect("should be object"); + schema_object + .properties + .iter() + .filter(|(name, _)| schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>() + } + + /// Insert the new entries into the #/components/schemas/ map. Panic if we try to overwrite an + /// entry with a different value. + fn update_schemas(&mut self) { + let generator = Self::new_generator(); + let mut root_schema = generator.into_root_schema_for::(); + for (key, value) in root_schema.definitions.drain(..) { + match self.components.schemas.entry(key) { + MapEntry::Occupied(current_value) => { + assert_eq!( + current_value.get().clone().into_object().metadata, + value.into_object().metadata + ) + } + MapEntry::Vacant(vacant) => { + let _ = vacant.insert(value); + } + } + } + } + + #[cfg(test)] + fn give_params_schema(&self) -> Schema { + let mut generator = Self::new_generator(); + T::OptionalRequestParams::json_schema(&mut generator) + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema, new)] +pub(crate) struct OpenRpcInfoField { + version: String, + title: String, + description: String, + contact: OpenRpcContactField, + license: OpenRpcLicenseField, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub(crate) struct OpenRpcContactField { + name: String, + url: String, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub(crate) struct OpenRpcLicenseField { + name: String, + url: String, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema, new)] +pub(crate) struct OpenRpcServerEntry { + name: String, + url: String, +} + +/// The struct containing the documentation for the RPCs. +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct Method { + name: String, + summary: String, + params: Vec, + result: ResponseResult, + examples: Vec, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct SchemaParam { + name: String, + schema: Schema, + required: bool, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ResponseResult { + name: String, + schema: Schema, +} + +/// An example pair of request params and response result. +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct Example { + name: String, + params: Vec, + result: ExampleResult, +} + +impl Example { + fn new(method_name: &str, maybe_params_obj: Option, result_value: Value) -> Self { + // Break the params struct into an array of param name and value pairs. + let params = match maybe_params_obj { + Some(params_obj) => params_obj + .as_object() + .unwrap() + .iter() + .map(|(name, value)| ExampleParam { + name: name.clone(), + value: value.clone(), + }) + .collect(), + None => vec![], + }; + + Example { + name: format!("{method_name}_example"), + params, + result: ExampleResult { + name: format!("{method_name}_example_result"), + value: result_value, + }, + } + } + + fn from_rpc_with_params() -> Self { + Self::new( + T::METHOD, + Some(json!(T::RequestParams::doc_example())), + json!(T::ResponseResult::doc_example()), + ) + } + + fn from_rpc_without_params() -> Self { + Self::new(T::METHOD, None, json!(T::ResponseResult::doc_example())) + } + + fn from_rpc_with_optional_params() -> Self { + Self::new( + T::METHOD, + Some(json!(T::OptionalRequestParams::doc_example())), + json!(T::ResponseResult::doc_example()), + ) + } +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ExampleParam { + name: String, + value: Value, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] +struct ExampleResult { + name: String, + value: Value, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, JsonSchema, Default)] +pub struct Components { + schemas: Map, +} + +/// Result for "rpc.discover" RPC response. +// +// Fields named as per https://spec.open-rpc.org/#service-discovery-method. +#[derive(Clone, PartialEq, Serialize, Deserialize, JsonSchema, Debug)] +#[serde(deny_unknown_fields)] +pub struct RpcDiscoverResult { + /// The RPC API version. + #[schemars(with = "String")] + api_version: ApiVersion, + name: String, + /// The list of supported RPCs. + #[schemars(skip)] + schema: OpenRpcSchema, +} + +impl DocExample for RpcDiscoverResult { + fn doc_example() -> &'static Self { + &LIST_RPCS_RESULT + } +} + +/// "rpc.discover" RPC. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub struct RpcDiscover {} + +#[async_trait] +impl RpcWithoutParams for RpcDiscover { + // Named as per https://spec.open-rpc.org/#service-discovery-method. + const METHOD: &'static str = "rpc.discover"; + type ResponseResult = RpcDiscoverResult; + + async fn do_handle_request( + _node_client: Arc, + ) -> Result { + Ok(RpcDiscoverResult::doc_example().clone()) + } +} + +mod doc_example_impls { + #[allow(deprecated)] + use casper_types::AuctionState; + use casper_types::{ + account::Account, Deploy, EraEndV1, EraEndV2, EraReport, PublicKey, Timestamp, Transaction, + }; + + use super::DocExample; + + impl DocExample for Deploy { + fn doc_example() -> &'static Self { + Deploy::example() + } + } + + impl DocExample for Transaction { + fn doc_example() -> &'static Self { + Transaction::example() + } + } + + impl DocExample for Account { + fn doc_example() -> &'static Self { + Account::example() + } + } + + impl DocExample for EraEndV1 { + fn doc_example() -> &'static Self { + EraEndV1::example() + } + } + + impl DocExample for EraEndV2 { + fn doc_example() -> &'static Self { + EraEndV2::example() + } + } + + impl DocExample for EraReport { + fn doc_example() -> &'static Self { + EraReport::::example() + } + } + + impl DocExample for Timestamp { + fn doc_example() -> &'static Self { + Timestamp::example() + } + } + + #[allow(deprecated)] + impl DocExample for AuctionState { + fn doc_example() -> &'static Self { + AuctionState::example() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn check_optional_params_fields() -> Vec { + let contact = OpenRpcContactField { + name: "Casper Labs".to_string(), + url: "https://casperlabs.io".to_string(), + }; + let license = OpenRpcLicenseField { + name: "APACHE LICENSE, VERSION 2.0".to_string(), + url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(), + }; + let info = OpenRpcInfoField { + version: DOCS_EXAMPLE_API_VERSION.to_string(), + title: "Client API of Casper Node".to_string(), + description: "This describes the JSON-RPC 2.0 API of a node on the Casper network." + .to_string(), + contact, + license, + }; + let schema = OpenRpcSchema::new("1.0.0-rc1".to_string(), info, vec![SERVER.clone()]); + let params = schema.give_params_schema::(); + let schema_object = params.into_object().object.expect("should be object"); + schema_object + .properties + .iter() + .filter(|(name, _)| !schema_object.required.contains(*name)) + .map(|(name, schema)| SchemaParam { + name: name.clone(), + schema: schema.clone(), + required: false, + }) + .collect::>() + } + + #[test] + fn check_chain_get_block_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_block_transfers_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_state_root_hash_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_chain_get_era_info_by_switch_block_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_state_get_auction_info_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } + + #[test] + fn check_state_get_auction_info_v2_required_fields() { + let incorrect_optional_params = check_optional_params_fields::(); + assert!(incorrect_optional_params.is_empty()) + } +} diff --git a/rpc_sidecar/src/rpcs/error.rs b/rpc_sidecar/src/rpcs/error.rs new file mode 100644 index 00000000..c8fbfbb6 --- /dev/null +++ b/rpc_sidecar/src/rpcs/error.rs @@ -0,0 +1,141 @@ +use crate::node_client::{Error as NodeClientError, InvalidTransactionOrDeploy}; +use casper_json_rpc::{Error as RpcError, ReservedErrorCode}; +use casper_types::{ + bytesrepr, AvailableBlockRange, BlockIdentifier, DeployHash, KeyTag, TransactionHash, + URefFromStrError, +}; + +use super::{ErrorCode, ErrorData}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("request for {0} has failed: {1}")] + NodeRequest(&'static str, NodeClientError), + #[error("no block found for the provided identifier")] + NoBlockFound(Option, AvailableBlockRange), + #[error("no transaction for hash {0}")] + NoTransactionWithHash(TransactionHash), + #[error("no deploy for hash {0}")] + NoDeployWithHash(DeployHash), + #[error("found a transaction when searching for a deploy")] + FoundTransactionInsteadOfDeploy, + #[error("value was not found in the global state")] + GlobalStateEntryNotFound, + #[error("the requested purse URef was invalid: {0}")] + InvalidPurseURef(URefFromStrError), + #[error("the provided dictionary key was invalid: {0}")] + InvalidDictionaryKey(String), + #[error("the provided dictionary key points at an unexpected type: {0}")] + InvalidTypeUnderDictionaryKey(String), + #[error("the provided dictionary key doesn't exist")] + DictionaryKeyNotFound, + #[error("the provided dictionary name doesn't exist")] + DictionaryNameNotFound, + #[error("the requested main purse was not found")] + MainPurseNotFound, + #[error("the requested account was not found")] + AccountNotFound, + #[error("the requested addressable entity was not found")] + AddressableEntityNotFound, + #[error("the requested reward was not found")] + RewardNotFound, + #[error("the requested account has been migrated to an addressable entity")] + AccountMigratedToEntity, + #[error("the provided dictionary value is {0} instead of a URef")] + DictionaryValueIsNotAUref(KeyTag), + #[error("the provided dictionary key could not be parsed: {0}")] + DictionaryKeyCouldNotBeParsed(String), + #[error("the transaction was invalid: {0}")] + InvalidTransaction(InvalidTransactionOrDeploy), + #[error("the deploy was invalid: {0}")] + InvalidDeploy(InvalidTransactionOrDeploy), + #[error("the requested purse balance could not be parsed")] + InvalidPurseBalance, + #[error("the requested account info could not be parsed")] + InvalidAccountInfo, + #[error("the requested addressable entity could not be parsed")] + InvalidAddressableEntity, + #[error("the auction state was invalid")] + InvalidAuctionState, + #[error("the named keys were invalid: {0}")] + InvalidNamedKeys(String), + #[error("the entry points were invalid: {0}")] + InvalidEntryPoints(String), + #[error("speculative execution returned nothing")] + SpecExecReturnedNothing, + #[error("unexpected bytesrepr failure: {0}")] + BytesreprFailure(bytesrepr::Error), +} + +impl Error { + fn code(&self) -> Option { + match self { + Error::NoBlockFound(_, _) => Some(ErrorCode::NoSuchBlock), + Error::NoTransactionWithHash(_) => Some(ErrorCode::NoSuchTransaction), + Error::NoDeployWithHash(_) => Some(ErrorCode::NoSuchDeploy), + Error::FoundTransactionInsteadOfDeploy => Some(ErrorCode::VariantMismatch), + Error::NodeRequest(_, NodeClientError::UnknownStateRootHash) => { + Some(ErrorCode::NoSuchStateRoot) + } + Error::GlobalStateEntryNotFound => Some(ErrorCode::QueryFailed), + Error::NodeRequest(_, NodeClientError::QueryFailedToExecute) => { + Some(ErrorCode::QueryFailedToExecute) + } + Error::NodeRequest(_, NodeClientError::FunctionIsDisabled) => { + Some(ErrorCode::FunctionIsDisabled) + } + Error::NodeRequest(_, NodeClientError::SwitchBlockNotFound) => { + Some(ErrorCode::SwitchBlockNotFound) + } + Error::NodeRequest(_, NodeClientError::SwitchBlockParentNotFound) => { + Some(ErrorCode::SwitchBlockParentNotFound) + } + Error::NodeRequest(_, NodeClientError::UnsupportedRewardsV1Request) => { + Some(ErrorCode::UnsupportedRewardsV1Request) + } + Error::NodeRequest(_, NodeClientError::PurseNotFound) => Some(ErrorCode::PurseNotFound), + Error::InvalidPurseURef(_) => Some(ErrorCode::FailedToParseGetBalanceURef), + Error::InvalidDictionaryKey(_) => Some(ErrorCode::FailedToParseQueryKey), + Error::MainPurseNotFound => Some(ErrorCode::NoMainPurse), + Error::AccountNotFound => Some(ErrorCode::NoSuchAccount), + Error::AddressableEntityNotFound => Some(ErrorCode::NoSuchAddressableEntity), + Error::RewardNotFound => Some(ErrorCode::NoRewardsFound), + Error::AccountMigratedToEntity => Some(ErrorCode::AccountMigratedToEntity), + Error::InvalidTypeUnderDictionaryKey(_) + | Error::DictionaryKeyNotFound + | Error::DictionaryNameNotFound + | Error::DictionaryValueIsNotAUref(_) + | Error::DictionaryKeyCouldNotBeParsed(_) => Some(ErrorCode::FailedToGetDictionaryURef), + Error::InvalidTransaction(_) => Some(ErrorCode::InvalidTransaction), + Error::NodeRequest(_, NodeClientError::SpecExecutionFailed(_)) + | Error::InvalidDeploy(_) + | Error::SpecExecReturnedNothing => Some(ErrorCode::InvalidDeploy), + Error::NodeRequest(_, _) => Some(ErrorCode::NodeRequestFailed), + Error::InvalidPurseBalance => Some(ErrorCode::FailedToGetBalance), + Error::InvalidAccountInfo + | Error::InvalidAddressableEntity + | Error::InvalidAuctionState + | Error::InvalidNamedKeys(_) + | Error::InvalidEntryPoints(_) + | Error::BytesreprFailure(_) => None, + } + } +} + +impl From for RpcError { + fn from(value: Error) -> Self { + match value { + Error::NoBlockFound(_, available_block_range) => RpcError::new( + ErrorCode::NoSuchBlock, + ErrorData::MissingBlockOrStateRoot { + message: value.to_string(), + available_block_range, + }, + ), + _ => match value.code() { + Some(code) => RpcError::new(code, value.to_string()), + None => RpcError::new(ReservedErrorCode::InternalError, value.to_string()), + }, + } + } +} diff --git a/rpc_sidecar/src/rpcs/error_code.rs b/rpc_sidecar/src/rpcs/error_code.rs new file mode 100644 index 00000000..d086f14e --- /dev/null +++ b/rpc_sidecar/src/rpcs/error_code.rs @@ -0,0 +1,119 @@ +use serde::{Deserialize, Serialize}; + +use casper_json_rpc::ErrorCodeT; + +/// The various codes which can be returned in the JSON-RPC Response's error object. +/// +/// **NOTE:** These values will be changed to lie outside the restricted range as defined in the +/// JSON-RPC spec as of casper-node v2.0.0. +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug)] +#[repr(i64)] +pub enum ErrorCode { + /// The requested Deploy was not found. + NoSuchDeploy = -32000, + /// The requested Block was not found. + NoSuchBlock = -32001, + /// Parsing the Key for a query failed. + FailedToParseQueryKey = -32002, + /// The query failed to find a result. + QueryFailed = -32003, + /// Executing the query failed. + QueryFailedToExecute = -32004, + /// Parsing the URef while getting a balance failed. + FailedToParseGetBalanceURef = -32005, + /// Failed to get the requested balance. + FailedToGetBalance = -32006, + /// Executing the query to retrieve the balance failed. + GetBalanceFailedToExecute = -32007, + /// The given Deploy cannot be executed as it is invalid. + InvalidDeploy = -32008, + /// The given account was not found. + NoSuchAccount = -32009, + /// Failed to get the requested dictionary URef. + FailedToGetDictionaryURef = -32010, + /// Failed to get the requested dictionary trie. + FailedToGetTrie = -32011, + /// The requested state root hash was not found. + NoSuchStateRoot = -32012, + /// The main purse for a given account hash does not exist. + NoMainPurse = -32013, + /// The requested Transaction was not found. + NoSuchTransaction = -32014, + /// Variant mismatch. + VariantMismatch = -32015, + /// The given Transaction cannot be executed as it is invalid. + InvalidTransaction = -32016, + /// The given Block could not be verified. + InvalidBlock = -32017, + /// Failed during a node request. + NodeRequestFailed = -32018, + /// The request could not be satisfied because an underlying function is disabled. + FunctionIsDisabled = -32019, + /// The requested addressable entity was not found. + NoSuchAddressableEntity = -32020, + /// The requested account has been migrated to an addressable entity. + AccountMigratedToEntity = -32021, + /// The requested reward was not found. + NoRewardsFound = -32022, + /// The switch block for the requested era was not found. + SwitchBlockNotFound = -32023, + /// The parent of the switch block for the requested era was not found. + SwitchBlockParentNotFound = -32024, + /// Cannot serve rewards stored in V1 format + UnsupportedRewardsV1Request = -32025, + /// Purse was not found for given identifier. + PurseNotFound = -32026, +} + +impl From for (i64, &'static str) { + fn from(error_code: ErrorCode) -> Self { + match error_code { + ErrorCode::NoSuchDeploy => (error_code as i64, "No such deploy"), + ErrorCode::NoSuchBlock => (error_code as i64, "No such block"), + ErrorCode::FailedToParseQueryKey => (error_code as i64, "Failed to parse query key"), + ErrorCode::QueryFailed => (error_code as i64, "Query failed"), + ErrorCode::QueryFailedToExecute => (error_code as i64, "Query failed to execute"), + ErrorCode::FailedToParseGetBalanceURef => { + (error_code as i64, "Failed to parse get-balance URef") + } + ErrorCode::FailedToGetBalance => (error_code as i64, "Failed to get balance"), + ErrorCode::GetBalanceFailedToExecute => { + (error_code as i64, "get-balance failed to execute") + } + ErrorCode::InvalidDeploy => (error_code as i64, "Invalid Deploy"), + ErrorCode::NoSuchAccount => (error_code as i64, "No such account"), + ErrorCode::FailedToGetDictionaryURef => { + (error_code as i64, "Failed to get dictionary URef") + } + ErrorCode::FailedToGetTrie => (error_code as i64, "Failed to get trie"), + ErrorCode::NoSuchStateRoot => (error_code as i64, "No such state root"), + ErrorCode::NoMainPurse => (error_code as i64, "Failed to get main purse"), + ErrorCode::NoSuchTransaction => (error_code as i64, "No such transaction"), + ErrorCode::VariantMismatch => (error_code as i64, "Variant mismatch internal error"), + ErrorCode::InvalidTransaction => (error_code as i64, "Invalid transaction"), + ErrorCode::InvalidBlock => (error_code as i64, "Invalid block"), + ErrorCode::NodeRequestFailed => (error_code as i64, "Node request failure"), + ErrorCode::FunctionIsDisabled => ( + error_code as i64, + "Function needed to execute this request is disabled", + ), + ErrorCode::NoSuchAddressableEntity => (error_code as i64, "No such addressable entity"), + ErrorCode::AccountMigratedToEntity => ( + error_code as i64, + "Account migrated to an addressable entity", + ), + ErrorCode::NoRewardsFound => (error_code as i64, "No rewards found"), + ErrorCode::SwitchBlockNotFound => (error_code as i64, "Switch block not found"), + ErrorCode::SwitchBlockParentNotFound => { + (error_code as i64, "Switch block parent not found") + } + ErrorCode::UnsupportedRewardsV1Request => ( + error_code as i64, + "Cannot serve rewards stored in V1 format", + ), + ErrorCode::PurseNotFound => (error_code as i64, "Purse not found"), + } + } +} + +impl ErrorCodeT for ErrorCode {} diff --git a/rpc_sidecar/src/rpcs/info.rs b/rpc_sidecar/src/rpcs/info.rs new file mode 100644 index 00000000..d7894da1 --- /dev/null +++ b/rpc_sidecar/src/rpcs/info.rs @@ -0,0 +1,940 @@ +//! RPCs returning ancillary information. + +use std::{collections::BTreeMap, str, sync::Arc}; + +use async_trait::async_trait; +use casper_binary_port::{EraIdentifier as PortEraIdentifier, MinimalBlockInfo}; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + execution::{ExecutionResult, ExecutionResultV2}, + ActivationPoint, AvailableBlockRange, Block, BlockHash, BlockIdentifier, + BlockSynchronizerStatus, ChainspecRawBytes, Deploy, DeployHash, Digest, EraId, ExecutionInfo, + NextUpgrade, Peers, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, + TransactionHash, ValidatorChange, U512, +}; + +use super::{ + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithParams, RpcWithoutParams, CURRENT_API_VERSION, +}; + +static GET_DEPLOY_PARAMS: Lazy = Lazy::new(|| GetDeployParams { + deploy_hash: *Deploy::doc_example().hash(), + finalized_approvals: true, +}); +static GET_DEPLOY_RESULT: Lazy = Lazy::new(|| GetDeployResult { + api_version: DOCS_EXAMPLE_API_VERSION, + deploy: Deploy::doc_example().clone(), + execution_info: Some(ExecutionInfo { + block_hash: *Block::example().hash(), + block_height: Block::example().clone_header().height(), + execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), + }), +}); +static GET_TRANSACTION_PARAMS: Lazy = Lazy::new(|| GetTransactionParams { + transaction_hash: Transaction::doc_example().hash(), + finalized_approvals: true, +}); +static GET_TRANSACTION_RESULT: Lazy = Lazy::new(|| GetTransactionResult { + api_version: DOCS_EXAMPLE_API_VERSION, + transaction: Transaction::doc_example().clone(), + execution_info: Some(ExecutionInfo { + block_hash: *Block::example().hash(), + block_height: Block::example().height(), + execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), + }), +}); +static GET_PEERS_RESULT: Lazy = Lazy::new(|| GetPeersResult { + api_version: DOCS_EXAMPLE_API_VERSION, + peers: Some(("tls:0101..0101".to_owned(), "127.0.0.1:54321".to_owned())) + .into_iter() + .collect::>() + .into(), +}); +static GET_VALIDATOR_CHANGES_RESULT: Lazy = Lazy::new(|| { + let change = JsonValidatorStatusChange::new(EraId::new(1), ValidatorChange::Added); + let public_key = PublicKey::example().clone(); + let changes = vec![JsonValidatorChanges::new(public_key, vec![change])]; + GetValidatorChangesResult { + api_version: DOCS_EXAMPLE_API_VERSION, + changes, + } +}); +static GET_CHAINSPEC_RESULT: Lazy = Lazy::new(|| GetChainspecResult { + api_version: DOCS_EXAMPLE_API_VERSION, + chainspec_bytes: ChainspecRawBytes::new(vec![42, 42].into(), None, None), +}); + +static GET_STATUS_RESULT: Lazy = Lazy::new(|| GetStatusResult { + api_version: DOCS_EXAMPLE_API_VERSION, + protocol_version: ProtocolVersion::from_parts(2, 0, 0), + peers: GET_PEERS_RESULT.peers.clone(), + chainspec_name: String::from("casper-example"), + starting_state_root_hash: Digest::default(), + last_added_block_info: Some(MinimalBlockInfo::from(Block::example().clone())), + our_public_signing_key: Some(PublicKey::example().clone()), + round_length: Some(TimeDiff::from_millis(1 << 16)), + next_upgrade: Some(NextUpgrade::new( + ActivationPoint::EraId(EraId::from(42)), + ProtocolVersion::from_parts(2, 0, 1), + )), + uptime: TimeDiff::from_seconds(13), + reactor_state: "Initialize".to_owned(), + last_progress: Timestamp::from(0), + available_block_range: AvailableBlockRange::RANGE_0_0, + block_sync: BlockSynchronizerStatus::example().clone(), + latest_switch_block_hash: Some(BlockHash::default()), + #[cfg(not(test))] + build_version: version_string(), + + // Prevent these values from changing between test sessions + #[cfg(test)] + build_version: String::from("1.0.0-xxxxxxxxx@DEBUG"), +}); +static GET_REWARD_PARAMS: Lazy = Lazy::new(|| GetRewardParams { + era_identifier: Some(EraIdentifier::Era(EraId::new(1))), + validator: PublicKey::example().clone(), + delegator: Some(PublicKey::example().clone()), +}); +static GET_REWARD_RESULT: Lazy = Lazy::new(|| GetRewardResult { + api_version: DOCS_EXAMPLE_API_VERSION, + reward_amount: U512::from(42), + era_id: EraId::new(1), + delegation_rate: 20, + switch_block_hash: BlockHash::default(), +}); + +/// Params for "info_get_deploy" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDeployParams { + /// The deploy hash. + pub deploy_hash: DeployHash, + /// Whether to return the deploy with the finalized approvals substituted. If `false` or + /// omitted, returns the deploy with the approvals that were originally received by the node. + #[serde(default = "finalized_approvals_default")] + pub finalized_approvals: bool, +} + +/// The default for `GetDeployParams::finalized_approvals` and +/// `GetTransactionParams::finalized_approvals`. +fn finalized_approvals_default() -> bool { + false +} + +impl DocExample for GetDeployParams { + fn doc_example() -> &'static Self { + &GET_DEPLOY_PARAMS + } +} + +/// Result for "info_get_deploy" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDeployResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The deploy. + pub deploy: Deploy, + /// Execution info, if available. + pub execution_info: Option, +} + +impl DocExample for GetDeployResult { + fn doc_example() -> &'static Self { + &GET_DEPLOY_RESULT + } +} + +/// "info_get_deploy" RPC. +pub struct GetDeploy {} + +#[async_trait] +impl RpcWithParams for GetDeploy { + const METHOD: &'static str = "info_get_deploy"; + type RequestParams = GetDeployParams; + type ResponseResult = GetDeployResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let hash = TransactionHash::from(params.deploy_hash); + let (transaction, execution_info) = node_client + .read_transaction_with_execution_info(hash, params.finalized_approvals) + .await + .map_err(|err| Error::NodeRequest("transaction", err))? + .ok_or(Error::NoDeployWithHash(params.deploy_hash))? + .into_inner(); + + let deploy = match transaction { + Transaction::Deploy(deploy) => deploy, + Transaction::V1(_) => return Err(Error::FoundTransactionInsteadOfDeploy.into()), + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + deploy, + execution_info, + }) + } +} + +/// Params for "info_get_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionParams { + /// The transaction hash. + pub transaction_hash: TransactionHash, + /// Whether to return the transaction with the finalized approvals substituted. If `false` or + /// omitted, returns the transaction with the approvals that were originally received by the + /// node. + #[serde(default = "finalized_approvals_default")] + pub finalized_approvals: bool, +} + +impl DocExample for GetTransactionParams { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_PARAMS + } +} + +/// Result for "info_get_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The transaction. + pub transaction: Transaction, + /// Execution info, if available. + pub execution_info: Option, +} + +impl DocExample for GetTransactionResult { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_RESULT + } +} + +/// "info_get_transaction" RPC. +pub struct GetTransaction {} + +#[async_trait] +impl RpcWithParams for GetTransaction { + const METHOD: &'static str = "info_get_transaction"; + type RequestParams = GetTransactionParams; + type ResponseResult = GetTransactionResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let (transaction, execution_info) = node_client + .read_transaction_with_execution_info( + params.transaction_hash, + params.finalized_approvals, + ) + .await + .map_err(|err| Error::NodeRequest("transaction", err))? + .ok_or(Error::NoTransactionWithHash(params.transaction_hash))? + .into_inner(); + + Ok(Self::ResponseResult { + transaction, + api_version: CURRENT_API_VERSION, + execution_info, + }) + } +} + +/// Result for "info_get_peers" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetPeersResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The node ID and network address of each connected peer. + pub peers: Peers, +} + +impl DocExample for GetPeersResult { + fn doc_example() -> &'static Self { + &GET_PEERS_RESULT + } +} + +/// "info_get_peers" RPC. +pub struct GetPeers {} + +#[async_trait] +impl RpcWithoutParams for GetPeers { + const METHOD: &'static str = "info_get_peers"; + type ResponseResult = GetPeersResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let peers = node_client + .read_peers() + .await + .map_err(|err| Error::NodeRequest("peers", err))?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + peers, + }) + } +} + +/// A single change to a validator's status in the given era. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorStatusChange { + /// The era in which the change occurred. + era_id: EraId, + /// The change in validator status. + validator_change: ValidatorChange, +} + +impl JsonValidatorStatusChange { + pub(crate) fn new(era_id: EraId, validator_change: ValidatorChange) -> Self { + JsonValidatorStatusChange { + era_id, + validator_change, + } + } +} + +/// The changes in a validator's status. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorChanges { + /// The public key of the validator. + public_key: PublicKey, + /// The set of changes to the validator's status. + status_changes: Vec, +} + +impl JsonValidatorChanges { + pub(crate) fn new( + public_key: PublicKey, + status_changes: Vec, + ) -> Self { + JsonValidatorChanges { + public_key, + status_changes, + } + } +} + +/// Result for the "info_get_validator_changes" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetValidatorChangesResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The validators' status changes. + pub changes: Vec, +} + +impl GetValidatorChangesResult { + pub(crate) fn new(changes: BTreeMap>) -> Self { + let changes = changes + .into_iter() + .map(|(public_key, mut validator_changes)| { + validator_changes.sort(); + let status_changes = validator_changes + .into_iter() + .map(|(era_id, validator_change)| { + JsonValidatorStatusChange::new(era_id, validator_change) + }) + .collect(); + JsonValidatorChanges::new(public_key, status_changes) + }) + .collect(); + GetValidatorChangesResult { + api_version: CURRENT_API_VERSION, + changes, + } + } +} + +impl DocExample for GetValidatorChangesResult { + fn doc_example() -> &'static Self { + &GET_VALIDATOR_CHANGES_RESULT + } +} + +/// "info_get_validator_changes" RPC. +pub struct GetValidatorChanges {} + +#[async_trait] +impl RpcWithoutParams for GetValidatorChanges { + const METHOD: &'static str = "info_get_validator_changes"; + type ResponseResult = GetValidatorChangesResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let changes = node_client + .read_validator_changes() + .await + .map_err(|err| Error::NodeRequest("validator changes", err))?; + Ok(Self::ResponseResult::new(changes.into())) + } +} + +/// Result for the "info_get_chainspec" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct GetChainspecResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The chainspec file bytes. + pub chainspec_bytes: ChainspecRawBytes, +} + +impl DocExample for GetChainspecResult { + fn doc_example() -> &'static Self { + &GET_CHAINSPEC_RESULT + } +} + +/// "info_get_chainspec" RPC. +pub struct GetChainspec {} + +#[async_trait] +impl RpcWithoutParams for GetChainspec { + const METHOD: &'static str = "info_get_chainspec"; + type ResponseResult = GetChainspecResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let chainspec_bytes = node_client + .read_chainspec_bytes() + .await + .map_err(|err| Error::NodeRequest("chainspec bytes", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + chainspec_bytes, + }) + } +} + +/// Result for "info_get_status" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetStatusResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The current Casper protocol version. + pub protocol_version: ProtocolVersion, + /// The node ID and network address of each connected peer. + pub peers: Peers, + /// The compiled node version. + pub build_version: String, + /// The chainspec name. + pub chainspec_name: String, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, + /// The minimal info of the last block from the linear chain. + #[schemars(with = "Option")] + pub last_added_block_info: Option, + /// Our public signing key. + pub our_public_signing_key: Option, + /// The next round length if this node is a validator. + pub round_length: Option, + /// Information about the next scheduled upgrade. + pub next_upgrade: Option, + /// Time that passed since the node has started. + pub uptime: TimeDiff, + /// The name of the current state of node reactor. + pub reactor_state: String, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, + /// The hash of the latest switch block. + pub latest_switch_block_hash: Option, +} + +impl DocExample for GetStatusResult { + fn doc_example() -> &'static Self { + &GET_STATUS_RESULT + } +} + +/// "info_get_status" RPC. +pub struct GetStatus {} + +#[async_trait] +impl RpcWithoutParams for GetStatus { + const METHOD: &'static str = "info_get_status"; + type ResponseResult = GetStatusResult; + + async fn do_handle_request( + node_client: Arc, + ) -> Result { + let status = node_client + .read_node_status() + .await + .map_err(|err| Error::NodeRequest("node status", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + protocol_version: status.protocol_version, + peers: status.peers, + chainspec_name: status.chainspec_name, + starting_state_root_hash: status.starting_state_root_hash, + last_added_block_info: status.last_added_block_info, + our_public_signing_key: status.our_public_signing_key, + round_length: status.round_length, + next_upgrade: status.next_upgrade, + uptime: status.uptime, + reactor_state: status.reactor_state.into_inner(), + last_progress: status.last_progress, + available_block_range: status.available_block_range, + block_sync: status.block_sync, + latest_switch_block_hash: status.latest_switch_block_hash, + build_version: status.build_version, + }) + } +} + +/// Params for "info_get_reward" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetRewardParams { + /// The era identifier. If `None`, the last finalized era is used. + pub era_identifier: Option, + /// The public key of the validator. + pub validator: PublicKey, + /// The public key of the delegator. If `Some`, the rewards for the delegator are returned. + /// If `None`, the rewards for the validator are returned. + pub delegator: Option, +} + +impl DocExample for GetRewardParams { + fn doc_example() -> &'static Self { + &GET_REWARD_PARAMS + } +} + +/// Identifier for an era. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub enum EraIdentifier { + Era(EraId), + Block(BlockIdentifier), +} + +/// Result for "info_get_reward" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetRewardResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The total reward amount in the requested era. + pub reward_amount: U512, + /// The era for which the reward was calculated. + pub era_id: EraId, + /// The delegation rate of the validator. + pub delegation_rate: u8, + /// The switch block hash at which the reward was distributed. + pub switch_block_hash: BlockHash, +} + +impl DocExample for GetRewardResult { + fn doc_example() -> &'static Self { + &GET_REWARD_RESULT + } +} + +/// "info_get_reward" RPC. +pub struct GetReward {} + +#[async_trait] +impl RpcWithParams for GetReward { + const METHOD: &'static str = "info_get_reward"; + type RequestParams = GetRewardParams; + type ResponseResult = GetRewardResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let identifier = match params.era_identifier { + Some(EraIdentifier::Era(era_id)) => Some(PortEraIdentifier::Era(era_id)), + Some(EraIdentifier::Block(block_id)) => Some(PortEraIdentifier::Block(block_id)), + None => None, + }; + + let result = node_client + .read_reward(identifier, params.validator, params.delegator) + .await + .map_err(|err| Error::NodeRequest("rewards", err))? + .ok_or(Error::RewardNotFound)?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + reward_amount: result.amount(), + era_id: result.era_id(), + delegation_rate: result.delegation_rate(), + switch_block_hash: result.switch_block_hash(), + }) + } +} + +#[cfg(not(test))] +fn version_string() -> String { + use std::env; + use tracing::warn; + + let mut version = env!("CARGO_PKG_VERSION").to_string(); + if let Ok(git_sha) = env::var("VERGEN_GIT_SHA") { + version = format!("{}-{}", version, git_sha); + } else { + warn!( + "vergen env var unavailable, casper-node build version will not include git short hash" + ); + } + + // Add a `@DEBUG` (or similar) tag to release string on non-release builds. + if env!("SIDECAR_BUILD_PROFILE") != "release" { + version += "@"; + let profile = env!("SIDECAR_BUILD_PROFILE").to_uppercase(); + version.push_str(&profile); + } + + version +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use crate::{rpcs::ErrorCode, ClientError}; + use casper_binary_port::{ + BinaryResponse, BinaryResponseAndRequest, Command, GetRequest, InformationRequest, + InformationRequestTag, RewardResponse, TransactionWithExecutionInfo, + }; + use casper_types::{ + bytesrepr::{Bytes, FromBytes, ToBytes}, + testing::TestRng, + BlockHash, TransactionV1, + }; + use pretty_assertions::assert_eq; + use rand::Rng; + + use super::*; + + #[tokio::test] + async fn get_deploy_result_none_execution_info_should_serialize_to_null() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let result = GetDeployResult { + api_version: CURRENT_API_VERSION, + deploy, + execution_info: None, + }; + + let json_value = serde_json::to_value(&result).unwrap(); + + assert!(json_value + .get("execution_info") + .expect("should have execution_info") + .is_null()); + } + + #[tokio::test] + async fn get_transaction_result_none_execution_info_should_serialize_to_null() { + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let result = GetTransactionResult { + api_version: CURRENT_API_VERSION, + transaction, + execution_info: None, + }; + + let json_value = serde_json::to_value(&result).unwrap(); + + assert!(json_value + .get("execution_info") + .expect("should have execution_info") + .is_null()); + } + + #[tokio::test] + async fn should_read_transaction() { + let rng = &mut TestRng::new(); + let transaction = Transaction::from(TransactionV1::random(rng)); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + let finalized_approvals = rng.gen(); + + let resp = GetTransaction::do_handle_request( + Arc::new(ValidTransactionMock::new( + TransactionWithExecutionInfo::new( + transaction.clone(), + Some(execution_info.clone()), + ), + finalized_approvals, + )), + GetTransactionParams { + transaction_hash: transaction.hash(), + finalized_approvals, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetTransactionResult { + api_version: CURRENT_API_VERSION, + transaction, + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_read_deploy_via_get_transaction() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + let finalized_approvals = rng.gen(); + + let resp = GetTransaction::do_handle_request( + Arc::new(ValidTransactionMock::new( + TransactionWithExecutionInfo::new( + Transaction::Deploy(deploy.clone()), + Some(execution_info.clone()), + ), + finalized_approvals, + )), + GetTransactionParams { + transaction_hash: deploy.hash().into(), + finalized_approvals, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetTransactionResult { + api_version: CURRENT_API_VERSION, + transaction: deploy.into(), + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_read_deploy_via_get_deploy() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + let finalized_approvals = rng.gen(); + + let resp = GetDeploy::do_handle_request( + Arc::new(ValidTransactionMock::new( + TransactionWithExecutionInfo::new( + Transaction::Deploy(deploy.clone()), + Some(execution_info.clone()), + ), + finalized_approvals, + )), + GetDeployParams { + deploy_hash: *deploy.hash(), + finalized_approvals, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetDeployResult { + api_version: CURRENT_API_VERSION, + deploy, + execution_info: Some(execution_info), + } + ); + } + + #[tokio::test] + async fn should_reject_transaction_when_asking_for_deploy() { + let rng = &mut TestRng::new(); + let transaction = TransactionV1::random(rng); + let execution_info = ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: Some(ExecutionResult::random(rng)), + }; + let finalized_approvals = rng.gen(); + + let err = GetDeploy::do_handle_request( + Arc::new(ValidTransactionMock::new( + TransactionWithExecutionInfo::new( + Transaction::V1(transaction.clone()), + Some(execution_info.clone()), + ), + finalized_approvals, + )), + GetDeployParams { + deploy_hash: DeployHash::new(*transaction.hash().inner()), + finalized_approvals, + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::VariantMismatch as i64); + } + + #[tokio::test] + async fn should_return_rewards() { + let rng = &mut TestRng::new(); + let reward_amount = U512::from(rng.gen_range(0..1000)); + let era_id = EraId::new(rng.gen_range(0..1000)); + let validator = PublicKey::random(rng); + let delegator = rng.gen::().then(|| PublicKey::random(rng)); + let delegation_rate = rng.gen_range(0..100); + let switch_block_hash = BlockHash::random(rng); + + let resp = GetReward::do_handle_request( + Arc::new(RewardMock { + reward_amount, + era_id, + delegation_rate, + switch_block_hash, + }), + GetRewardParams { + era_identifier: Some(EraIdentifier::Era(era_id)), + validator: validator.clone(), + delegator, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetRewardResult { + api_version: CURRENT_API_VERSION, + reward_amount, + era_id, + delegation_rate, + switch_block_hash + } + ); + } + + struct ValidTransactionMock { + transaction_bytes: Vec, + should_request_approvals: bool, + } + + impl ValidTransactionMock { + fn new(info: TransactionWithExecutionInfo, should_request_approvals: bool) -> Self { + let transaction_bytes = info.to_bytes().expect("should serialize transaction"); + ValidTransactionMock { + transaction_bytes, + should_request_approvals, + } + } + } + + #[async_trait] + impl NodeClient for ValidTransactionMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, key }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Transaction) => + { + let req = InformationRequest::try_from(( + InformationRequestTag::try_from(info_type_tag).unwrap(), + &key[..], + )) + .unwrap(); + assert!(matches!( + req, + InformationRequest::Transaction { with_finalized_approvals, .. } + if with_finalized_approvals == self.should_request_approvals + )); + let (transaction, _) = + TransactionWithExecutionInfo::from_bytes(&self.transaction_bytes) + .expect("should deserialize transaction"); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(transaction), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct RewardMock { + reward_amount: U512, + era_id: EraId, + delegation_rate: u8, + switch_block_hash: BlockHash, + } + + #[async_trait] + impl NodeClient for RewardMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Reward) => + { + let resp = RewardResponse::new( + self.reward_amount, + self.era_id, + self.delegation_rate, + self.switch_block_hash, + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(resp), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/speculative_exec.rs b/rpc_sidecar/src/rpcs/speculative_exec.rs new file mode 100644 index 00000000..be7cb4b1 --- /dev/null +++ b/rpc_sidecar/src/rpcs/speculative_exec.rs @@ -0,0 +1,257 @@ +//! RPC related to speculative execution. + +use std::{str, sync::Arc}; + +use async_trait::async_trait; +use casper_binary_port::SpeculativeExecutionResult; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_types::{Deploy, Transaction}; + +use super::{ + docs::{DocExample, OpenRpcSchema, DOCS_EXAMPLE_API_VERSION}, + speculative_open_rpc_schema::SPECULATIVE_OPEN_RPC_SCHEMA, + ApiVersion, Error, NodeClient, RpcError, RpcWithParams, RpcWithoutParams, CURRENT_API_VERSION, +}; + +static SPECULATIVE_EXEC_TXN_PARAMS: Lazy = + Lazy::new(|| SpeculativeExecTxnParams { + transaction: Transaction::doc_example().clone(), + }); +static SPECULATIVE_EXEC_TXN_RESULT: Lazy = + Lazy::new(|| SpeculativeExecTxnResult { + api_version: DOCS_EXAMPLE_API_VERSION, + execution_result: SpeculativeExecutionResult::example().clone(), + }); +static SPECULATIVE_EXEC_PARAMS: Lazy = Lazy::new(|| SpeculativeExecParams { + deploy: Deploy::doc_example().clone(), +}); + +/// Params for "speculative_exec_txn" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecTxnParams { + /// Transaction to execute. + pub transaction: Transaction, +} + +impl DocExample for SpeculativeExecTxnParams { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_TXN_PARAMS + } +} + +/// Result for "speculative_exec_txn" and "speculative_exec" RPC responses. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecTxnResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// Result of the speculative execution. + #[schemars(with = "crate::rpcs::types::SpeculativeExecutionResultSchema")] + pub execution_result: SpeculativeExecutionResult, +} + +impl DocExample for SpeculativeExecTxnResult { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_TXN_RESULT + } +} + +/// "speculative_exec_txn" RPC +pub struct SpeculativeExecTxn {} + +#[async_trait] +impl RpcWithParams for SpeculativeExecTxn { + const METHOD: &'static str = "speculative_exec_txn"; + type RequestParams = SpeculativeExecTxnParams; + type ResponseResult = SpeculativeExecTxnResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + handle_request(node_client, params.transaction).await + } +} + +/// Params for "speculative_exec" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecParams { + /// Deploy to execute. + pub deploy: Deploy, +} + +impl DocExample for SpeculativeExecParams { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_PARAMS + } +} + +/// "speculative_exec" RPC +pub struct SpeculativeExec {} + +#[async_trait] +impl RpcWithParams for SpeculativeExec { + const METHOD: &'static str = "speculative_exec"; + type RequestParams = SpeculativeExecParams; + type ResponseResult = SpeculativeExecTxnResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + handle_request(node_client, params.deploy.into()).await + } +} + +async fn handle_request( + node_client: Arc, + transaction: Transaction, +) -> Result { + let speculative_execution_result = node_client + .exec_speculatively(transaction) + .await + .map_err(|err| Error::NodeRequest("speculatively executing a transaction", err))?; + + Ok(SpeculativeExecTxnResult { + api_version: CURRENT_API_VERSION, + execution_result: speculative_execution_result, + }) +} + +#[derive(Clone, PartialEq, Serialize, Deserialize, JsonSchema, Debug)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeRpcDiscoverResult { + /// The RPC API version. + #[schemars(with = "String")] + api_version: ApiVersion, + name: String, + /// The list of supported RPCs. + #[schemars(skip)] + schema: OpenRpcSchema, +} + +static SPECULATIVE_DISCOVER_RPC_RESULT: Lazy = + Lazy::new(|| SpeculativeRpcDiscoverResult { + api_version: DOCS_EXAMPLE_API_VERSION, + name: "OpenRPC Schema for speculative exectution server".to_string(), + schema: SPECULATIVE_OPEN_RPC_SCHEMA.clone(), + }); + +impl DocExample for SpeculativeRpcDiscoverResult { + fn doc_example() -> &'static Self { + &SPECULATIVE_DISCOVER_RPC_RESULT + } +} + +/// "rpc.discover" RPC. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub struct SpeculativeRpcDiscover {} + +#[async_trait] +impl RpcWithoutParams for SpeculativeRpcDiscover { + const METHOD: &'static str = "rpc.discover"; + type ResponseResult = SpeculativeRpcDiscoverResult; + + async fn do_handle_request( + _node_client: Arc, + ) -> Result { + Ok(SpeculativeRpcDiscoverResult::doc_example().clone()) + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use casper_binary_port::{ + BinaryResponse, BinaryResponseAndRequest, Command, GetRequest, InformationRequestTag, + SpeculativeExecutionResult, + }; + use casper_types::{bytesrepr::Bytes, testing::TestRng}; + use pretty_assertions::assert_eq; + + use crate::ClientError; + + use super::*; + + #[tokio::test] + async fn should_spec_exec() { + let rng = &mut TestRng::new(); + let deploy = Deploy::random(rng); + let execution_result = SpeculativeExecutionResult::random(rng); + + let res = SpeculativeExec::do_handle_request( + Arc::new(ValidSpecExecMock { + execution_result: execution_result.clone(), + }), + SpeculativeExecParams { deploy }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + SpeculativeExecTxnResult { + execution_result, + api_version: CURRENT_API_VERSION, + } + ) + } + + #[tokio::test] + async fn should_spec_exec_txn() { + let rng = &mut TestRng::new(); + let transaction = Transaction::random(rng); + let execution_result = SpeculativeExecutionResult::random(rng); + + let res = SpeculativeExecTxn::do_handle_request( + Arc::new(ValidSpecExecMock { + execution_result: execution_result.clone(), + }), + SpeculativeExecTxnParams { transaction }, + ) + .await + .expect("should handle request"); + assert_eq!( + res, + SpeculativeExecTxnResult { + execution_result, + api_version: CURRENT_API_VERSION, + } + ) + } + + struct ValidSpecExecMock { + execution_result: SpeculativeExecutionResult, + } + + #[async_trait] + impl NodeClient for ValidSpecExecMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.execution_result.clone()), + Bytes::from(vec![]), + )) + } + Command::TrySpeculativeExec { .. } => Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.execution_result.clone()), + Bytes::from(vec![]), + )), + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/speculative_open_rpc_schema.rs b/rpc_sidecar/src/rpcs/speculative_open_rpc_schema.rs new file mode 100644 index 00000000..8ee4afc1 --- /dev/null +++ b/rpc_sidecar/src/rpcs/speculative_open_rpc_schema.rs @@ -0,0 +1,37 @@ +use once_cell::sync::Lazy; + +use super::{ + docs::{ + OpenRpcInfoField, OpenRpcSchema, OpenRpcServerEntry, CONTACT, DOCS_EXAMPLE_API_VERSION, + LICENSE, OPEN_RPC_VERSION, + }, + speculative_exec::{SpeculativeExec, SpeculativeExecTxn}, +}; + +pub(crate) static SERVER: Lazy = Lazy::new(|| { + OpenRpcServerEntry::new( + "any Sidecar with speculative JSON RPC API enabled".to_string(), + "http://IP:PORT/rpc/".to_string(), + ) +}); + +pub(crate) static SPECULATIVE_OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { + let info = OpenRpcInfoField::new( + DOCS_EXAMPLE_API_VERSION.to_string(), + "Speculative execution client API of Casper Node".to_string(), + "This describes the JSON-RPC 2.0 API of the speculative execution functinality of a node on the Casper network." + .to_string(), + CONTACT.clone(), + LICENSE.clone(), + ); + let mut schema = OpenRpcSchema::new(OPEN_RPC_VERSION.to_string(), info, vec![SERVER.clone()]); + schema.push_with_params::( + "receives a Deploy to be executed by the network (DEPRECATED: use \ + `account_put_transaction` instead)", + ); + schema.push_with_params::( + "receives a Deploy to be executed by the network (DEPRECATED: use \ + `account_put_transaction` instead)", + ); + schema +}); diff --git a/rpc_sidecar/src/rpcs/state.rs b/rpc_sidecar/src/rpcs/state.rs new file mode 100644 index 00000000..0244f7f1 --- /dev/null +++ b/rpc_sidecar/src/rpcs/state.rs @@ -0,0 +1,2795 @@ +//! RPCs related to the state. + +mod auction_state; + +pub(crate) use auction_state::{JsonEraValidators, JsonValidatorWeight, ERA_VALIDATORS}; +use std::{collections::BTreeMap, str, sync::Arc}; + +use crate::node_client::{EntityResponse, PackageResponse}; +use async_trait::async_trait; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{ + common::{ + self, ByteCodeWithProof, ContractWasmWithProof, EntityWithBackwardCompat, + PackageWithBackwardCompat, MERKLE_PROOF, + }, + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithOptionalParams, RpcWithParams, + CURRENT_API_VERSION, +}; +use auction_state::AuctionState; +use casper_binary_port::{ + DictionaryItemIdentifier, EntityIdentifier as PortEntityIdentifier, + PackageIdentifier as PortPackageIdentifier, PurseIdentifier as PortPurseIdentifier, +}; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + account::{Account, AccountHash}, + addressable_entity::EntityKindTag, + bytesrepr::Bytes, + contracts::{ContractHash, ContractPackageHash}, + system::{ + auction::{ + BidKind, EraValidators, SeigniorageRecipientsV1, SeigniorageRecipientsV2, + ValidatorWeights, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + }, + AUCTION, + }, + AddressableEntity, AddressableEntityHash, BlockHash, BlockHeader, BlockHeaderV2, + BlockIdentifier, BlockTime, BlockV2, CLValue, Digest, EntityAddr, EntryPoint, EntryPointValue, + EraId, GlobalStateIdentifier, Key, KeyTag, Package, PackageHash, PublicKey, SecretKey, + StoredValue, URef, U512, +}; +#[cfg(test)] +use rand::Rng; + +static GET_ITEM_PARAMS: Lazy = Lazy::new(|| GetItemParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + key: Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(), + path: vec!["inner".to_string()], +}); +static GET_ITEM_RESULT: Lazy = Lazy::new(|| GetItemResult { + api_version: DOCS_EXAMPLE_API_VERSION, + stored_value: StoredValue::CLValue(CLValue::from_t(1u64).unwrap()), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_BALANCE_PARAMS: Lazy = Lazy::new(|| GetBalanceParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + purse_uref: "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + .to_string(), +}); +static GET_BALANCE_RESULT: Lazy = Lazy::new(|| GetBalanceResult { + api_version: DOCS_EXAMPLE_API_VERSION, + balance_value: U512::from(123_456), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_AUCTION_INFO_PARAMS: Lazy = Lazy::new(|| GetAuctionInfoParams { + block_identifier: BlockIdentifier::Hash(*BlockHash::example()), +}); +static GET_AUCTION_INFO_RESULT: Lazy = Lazy::new(|| GetAuctionInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + auction_state: AuctionState::doc_example().clone(), +}); +static GET_ACCOUNT_INFO_PARAMS: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key = PublicKey::from(&secret_key); + GetAccountInfoParams { + account_identifier: AccountIdentifier::PublicKey(public_key), + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + } +}); +static GET_ACCOUNT_INFO_RESULT: Lazy = Lazy::new(|| GetAccountInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + account: Account::doc_example().clone(), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_ADDRESSABLE_ENTITY_PARAMS: Lazy = + Lazy::new(|| GetAddressableEntityParams { + entity_identifier: EntityIdentifier::EntityAddr(EntityAddr::new_account([0; 32])), + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + include_bytecode: None, + }); +static GET_ADDRESSABLE_ENTITY_RESULT: Lazy = + Lazy::new(|| GetAddressableEntityResult { + api_version: DOCS_EXAMPLE_API_VERSION, + merkle_proof: MERKLE_PROOF.clone(), + entity: EntityWithBackwardCompat::AddressableEntity { + entity: AddressableEntity::example().clone(), + named_keys: [("key".to_string(), Key::Hash([0u8; 32]))] + .iter() + .cloned() + .collect::>() + .into(), + entry_points: vec![EntryPointValue::new_v1_entry_point_value( + EntryPoint::default_with_name("entry_point"), + )], + bytecode: None, + }, + }); +static GET_PACKAGE_PARAMS: Lazy = Lazy::new(|| GetPackageParams { + package_identifier: PackageIdentifier::ContractPackageHash(ContractPackageHash::new([0; 32])), + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), +}); +static GET_PACKAGE_RESULT: Lazy = Lazy::new(|| GetPackageResult { + api_version: DOCS_EXAMPLE_API_VERSION, + package: PackageWithBackwardCompat::Package( + Package::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + .clone(), + ), + merkle_proof: MERKLE_PROOF.clone(), +}); +static GET_DICTIONARY_ITEM_PARAMS: Lazy = + Lazy::new(|| GetDictionaryItemParams { + state_root_hash: *BlockHeaderV2::example().state_root_hash(), + dictionary_identifier: DictionaryIdentifier::URef { + seed_uref: "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" + .to_string(), + dictionary_item_key: "a_unique_entry_identifier".to_string(), + }, + }); +static GET_DICTIONARY_ITEM_RESULT: Lazy = + Lazy::new(|| GetDictionaryItemResult { + api_version: DOCS_EXAMPLE_API_VERSION, + dictionary_key: + "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f" + .to_string(), + stored_value: StoredValue::CLValue(CLValue::from_t(1u64).unwrap()), + merkle_proof: MERKLE_PROOF.clone(), + }); +static QUERY_GLOBAL_STATE_PARAMS: Lazy = + Lazy::new(|| QueryGlobalStateParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*BlockV2::example().hash())), + key: Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(), + path: vec![], + }); +static QUERY_GLOBAL_STATE_RESULT: Lazy = + Lazy::new(|| QueryGlobalStateResult { + api_version: DOCS_EXAMPLE_API_VERSION, + block_header: Some(BlockHeaderV2::example().clone().into()), + stored_value: StoredValue::Account(Account::doc_example().clone()), + merkle_proof: MERKLE_PROOF.clone(), + }); +static GET_TRIE_PARAMS: Lazy = Lazy::new(|| GetTrieParams { + trie_key: *BlockHeaderV2::example().state_root_hash(), +}); +static GET_TRIE_RESULT: Lazy = Lazy::new(|| GetTrieResult { + api_version: DOCS_EXAMPLE_API_VERSION, + maybe_trie_bytes: None, +}); +static QUERY_BALANCE_PARAMS: Lazy = Lazy::new(|| QueryBalanceParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*BlockHash::example())), + purse_identifier: PurseIdentifier::MainPurseUnderAccountHash(AccountHash::new([9u8; 32])), +}); +static QUERY_BALANCE_RESULT: Lazy = Lazy::new(|| QueryBalanceResult { + api_version: DOCS_EXAMPLE_API_VERSION, + balance: U512::from(123_456), +}); +static QUERY_BALANCE_DETAILS_PARAMS: Lazy = + Lazy::new(|| QueryBalanceDetailsParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*BlockHash::example())), + purse_identifier: PurseIdentifier::MainPurseUnderAccountHash(AccountHash::new([9u8; 32])), + }); +static QUERY_BALANCE_DETAILS_RESULT: Lazy = + Lazy::new(|| QueryBalanceDetailsResult { + api_version: DOCS_EXAMPLE_API_VERSION, + total_balance: U512::from(123_456), + available_balance: U512::from(123_456), + total_balance_proof: MERKLE_PROOF.clone(), + holds: vec![BalanceHoldWithProof { + time: BlockTime::new(0), + amount: U512::from(123_456), + proof: MERKLE_PROOF.clone(), + }], + }); + +/// Params for "state_get_item" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetItemParams { + /// Hash of the state root. + pub state_root_hash: Digest, + /// The key under which to query. + pub key: Key, + /// The path components starting from the key as base. + #[serde(default)] + pub path: Vec, +} + +impl DocExample for GetItemParams { + fn doc_example() -> &'static Self { + &GET_ITEM_PARAMS + } +} + +/// Result for "state_get_item" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetItemResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetItemResult { + fn doc_example() -> &'static Self { + &GET_ITEM_RESULT + } +} + +/// "state_get_item" RPC. +pub struct GetItem {} + +#[async_trait] +impl RpcWithParams for GetItem { + const METHOD: &'static str = "state_get_item"; + type RequestParams = GetItemParams; + type ResponseResult = GetItemResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + let (stored_value, merkle_proof) = node_client + .query_global_state(Some(state_identifier), params.key, params.path) + .await + .map_err(|err| Error::NodeRequest("global state item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + stored_value, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +/// Params for "state_get_balance" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBalanceParams { + /// The hash of state root. + pub state_root_hash: Digest, + /// Formatted URef. + pub purse_uref: String, +} + +impl DocExample for GetBalanceParams { + fn doc_example() -> &'static Self { + &GET_BALANCE_PARAMS + } +} + +/// Result for "state_get_balance" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetBalanceResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The available balance in motes (total balance - sum of all active holds). + pub balance_value: U512, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetBalanceResult { + fn doc_example() -> &'static Self { + &GET_BALANCE_RESULT + } +} + +/// "state_get_balance" RPC. +pub struct GetBalance {} + +#[async_trait] +impl RpcWithParams for GetBalance { + const METHOD: &'static str = "state_get_balance"; + type RequestParams = GetBalanceParams; + type ResponseResult = GetBalanceResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let purse_uref = + URef::from_formatted_str(¶ms.purse_uref).map_err(Error::InvalidPurseURef)?; + + let state_id = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + let purse_id = PortPurseIdentifier::Purse(purse_uref); + let balance = node_client + .read_balance(Some(state_id), purse_id) + .await + .map_err(|err| Error::NodeRequest("balance", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + balance_value: balance.available_balance, + merkle_proof: common::encode_proof(&vec![*balance.total_balance_proof])?, + }) + } +} + +/// Params for "state_get_auction_info" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct GetAuctionInfoParams { + /// The block identifier. + pub block_identifier: BlockIdentifier, +} + +impl DocExample for GetAuctionInfoParams { + fn doc_example() -> &'static Self { + &GET_AUCTION_INFO_PARAMS + } +} + +/// Result for "state_get_auction_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAuctionInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The auction state. + pub auction_state: AuctionState, +} + +impl DocExample for GetAuctionInfoResult { + fn doc_example() -> &'static Self { + &GET_AUCTION_INFO_RESULT + } +} + +/// "state_get_auction_info" RPC. +pub struct GetAuctionInfo {} + +#[async_trait] +impl RpcWithOptionalParams for GetAuctionInfo { + const METHOD: &'static str = "state_get_auction_info"; + type OptionalRequestParams = GetAuctionInfoParams; + type ResponseResult = GetAuctionInfoResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let block_identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, block_identifier).await?; + let state_identifier = block_identifier.map(GlobalStateIdentifier::from); + let state_identifier = + state_identifier.unwrap_or(GlobalStateIdentifier::BlockHeight(block_header.height())); + + let is_1x = block_header.protocol_version().value().major == 1; + let bids = fetch_bid_kinds(node_client.clone(), state_identifier, is_1x).await?; + + // always retrieve the latest system contract registry, old versions of the node + // did not write it to the global state + let (registry_value, _) = node_client + .query_global_state(None, Key::SystemEntityRegistry, vec![]) + .await + .map_err(|err| Error::NodeRequest("system contract registry", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let registry: BTreeMap = registry_value + .into_cl_value() + .ok_or(Error::InvalidAuctionState)? + .into_t() + .map_err(|_| Error::InvalidAuctionState)?; + let &auction_hash = registry.get(AUCTION).ok_or(Error::InvalidAuctionState)?; + + let (snapshot_value, _) = if let Some(result) = node_client + .query_global_state( + Some(state_identifier), + Key::addressable_entity_key(EntityKindTag::System, auction_hash), + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + ) + .await + .map_err(|err| Error::NodeRequest("auction snapshot", err))? + { + result.into_inner() + } else { + node_client + .query_global_state( + Some(state_identifier), + Key::Hash(auction_hash.value()), + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + ) + .await + .map_err(|err| Error::NodeRequest("auction snapshot", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner() + }; + let snapshot = snapshot_value + .into_cl_value() + .ok_or(Error::InvalidAuctionState)?; + + let validators = era_validators_from_snapshot(snapshot, is_1x)?; + let auction_state = AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + validators, + bids, + ); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + auction_state, + }) + } +} + +pub(crate) async fn fetch_bid_kinds( + node_client: Arc, + state_identifier: GlobalStateIdentifier, + is_1x: bool, +) -> Result, RpcError> { + let key_tag = if is_1x { KeyTag::Bid } else { KeyTag::BidAddr }; + let stored_values = node_client + .query_global_state_by_tag(Some(state_identifier), key_tag) + .await + .map_err(|err| Error::NodeRequest("auction bids", err))? + .into_iter(); + let res: Result, Error> = if is_1x { + stored_values + .map(|v| v.into_bid().ok_or(Error::InvalidAuctionState)) + .map(|bid_res| bid_res.map(|bid| BidKind::Unified(bid.into()))) + .collect() + } else { + stored_values + .map(|value| value.into_bid_kind().ok_or(Error::InvalidAuctionState)) + .collect() + }; + res.map_err(|e| e.into()) +} + +/// Identifier of an account. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields, untagged)] +pub enum AccountIdentifier { + /// The public key of an account + PublicKey(PublicKey), + /// The account hash of an account + AccountHash(AccountHash), +} + +impl AccountIdentifier { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + 0 => AccountIdentifier::PublicKey(PublicKey::random(rng)), + 1 => AccountIdentifier::AccountHash(rng.gen()), + _ => unreachable!(), + } + } +} + +/// Params for "state_get_account_info" RPC request +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAccountInfoParams { + /// The public key of the Account. + #[serde(alias = "public_key")] + pub account_identifier: AccountIdentifier, + /// The block identifier. + pub block_identifier: Option, +} + +impl DocExample for GetAccountInfoParams { + fn doc_example() -> &'static Self { + &GET_ACCOUNT_INFO_PARAMS + } +} + +/// Result for "state_get_account_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAccountInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The account. + pub account: Account, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetAccountInfoResult { + fn doc_example() -> &'static Self { + &GET_ACCOUNT_INFO_RESULT + } +} + +/// "state_get_account_info" RPC. +pub struct GetAccountInfo {} + +#[async_trait] +impl RpcWithParams for GetAccountInfo { + const METHOD: &'static str = "state_get_account_info"; + type RequestParams = GetAccountInfoParams; + type ResponseResult = GetAccountInfoResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let maybe_state_identifier = params.block_identifier.map(GlobalStateIdentifier::from); + let base_key = { + let account_hash = match params.account_identifier { + AccountIdentifier::PublicKey(public_key) => public_key.to_account_hash(), + AccountIdentifier::AccountHash(account_hash) => account_hash, + }; + Key::Account(account_hash) + }; + let (account_value, merkle_proof) = node_client + .query_global_state(maybe_state_identifier, base_key, vec![]) + .await + .map_err(|err| Error::NodeRequest("account info", err))? + .ok_or(Error::AccountNotFound)? + .into_inner(); + let account = account_value + .into_account() + .ok_or(Error::AccountMigratedToEntity)?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + account, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +/// Identifier of an addressable entity. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub enum EntityIdentifier { + /// The hash of a contract. + ContractHash(ContractHash), + /// The public key of an account. + PublicKey(PublicKey), + /// The account hash of an account. + AccountHash(AccountHash), + /// The address of an addressable entity. + EntityAddr(EntityAddr), +} + +impl EntityIdentifier { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => EntityIdentifier::PublicKey(PublicKey::random(rng)), + 1 => EntityIdentifier::AccountHash(rng.gen()), + 2 => EntityIdentifier::EntityAddr(rng.gen()), + _ => unreachable!(), + } + } + + pub fn into_port_entity_identifier(self) -> PortEntityIdentifier { + match self { + EntityIdentifier::ContractHash(contract_hash) => { + PortEntityIdentifier::ContractHash(contract_hash) + } + EntityIdentifier::PublicKey(public_key) => PortEntityIdentifier::PublicKey(public_key), + EntityIdentifier::AccountHash(account_hash) => { + PortEntityIdentifier::AccountHash(account_hash) + } + EntityIdentifier::EntityAddr(entity_addr) => { + PortEntityIdentifier::EntityAddr(entity_addr) + } + } + } +} + +/// Params for "state_get_entity" RPC request +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAddressableEntityParams { + /// The identifier of the entity. + pub entity_identifier: EntityIdentifier, + /// The block identifier. + pub block_identifier: Option, + /// Whether to include the entity's bytecode in the response. + pub include_bytecode: Option, +} + +impl DocExample for GetAddressableEntityParams { + fn doc_example() -> &'static Self { + &GET_ADDRESSABLE_ENTITY_PARAMS + } +} + +/// Result for "state_get_entity" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAddressableEntityResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The addressable entity or a legacy account. + pub entity: EntityWithBackwardCompat, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetAddressableEntityResult { + fn doc_example() -> &'static Self { + &GET_ADDRESSABLE_ENTITY_RESULT + } +} + +/// "state_get_entity" RPC. +pub struct GetAddressableEntity {} + +#[async_trait] +impl RpcWithParams for GetAddressableEntity { + const METHOD: &'static str = "state_get_entity"; + type RequestParams = GetAddressableEntityParams; + type ResponseResult = GetAddressableEntityResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = params.block_identifier.map(GlobalStateIdentifier::from); + let identifier = params.entity_identifier.into_port_entity_identifier(); + let include_bytecode = params.include_bytecode.unwrap_or(false); + + let (entity, merkle_proof) = match node_client + .read_entity(state_identifier, identifier, include_bytecode) + .await + .map_err(|err| Error::NodeRequest("entity", err))? + .ok_or(Error::AddressableEntityNotFound)? + { + EntityResponse::Entity(entity_with_bytecode) => { + let (addr, entity_with_proof, bytecode) = entity_with_bytecode.into_inner(); + let (entity, proof) = entity_with_proof.into_inner(); + let named_keys = + common::get_entity_named_keys(&*node_client, addr, state_identifier).await?; + let entry_points = + common::get_entity_entry_points(&*node_client, addr, state_identifier).await?; + let bytecode = bytecode + .map(|code_with_proof| { + let (code, proof) = code_with_proof.into_inner(); + Ok::<_, Error>(ByteCodeWithProof::new(code, common::encode_proof(&proof)?)) + }) + .transpose()?; + ( + EntityWithBackwardCompat::AddressableEntity { + entity, + named_keys, + entry_points, + bytecode, + }, + proof, + ) + } + EntityResponse::Account(account) => { + let (account, merkle_proof) = account.into_inner(); + (EntityWithBackwardCompat::Account(account), merkle_proof) + } + EntityResponse::Contract(contract_with_wasm) => { + let (_, contract_with_proof, wasm) = contract_with_wasm.into_inner(); + let (contract, proof) = contract_with_proof.into_inner(); + let wasm = wasm + .map(|wasm_with_proof| { + let (wasm, proof) = wasm_with_proof.into_inner(); + Ok::<_, Error>(ContractWasmWithProof::new( + wasm, + common::encode_proof(&proof)?, + )) + }) + .transpose()?; + (EntityWithBackwardCompat::Contract { contract, wasm }, proof) + } + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + entity, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +/// Identifier of a package. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub enum PackageIdentifier { + /// The address of a package. + PackageAddr(PackageHash), + /// The hash of a contract package. + ContractPackageHash(ContractPackageHash), +} + +impl PackageIdentifier { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + 0 => Self::PackageAddr(PackageHash::new(rng.gen())), + 1 => Self::ContractPackageHash(ContractPackageHash::new(rng.gen())), + _ => unreachable!(), + } + } + + pub fn into_port_package_identifier(self) -> PortPackageIdentifier { + match self { + Self::PackageAddr(package_addr) => { + PortPackageIdentifier::PackageAddr(package_addr.value()) + } + Self::ContractPackageHash(contract_package_hash) => { + PortPackageIdentifier::ContractPackageHash(contract_package_hash) + } + } + } +} + +/// Params for "state_get_entity" RPC request +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetPackageParams { + /// The identifier of the package. + pub package_identifier: PackageIdentifier, + /// The block identifier. + pub block_identifier: Option, +} + +impl DocExample for GetPackageParams { + fn doc_example() -> &'static Self { + &GET_PACKAGE_PARAMS + } +} + +/// Result for "state_get_entity" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetPackageResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The addressable entity or a legacy account. + pub package: PackageWithBackwardCompat, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetPackageResult { + fn doc_example() -> &'static Self { + &GET_PACKAGE_RESULT + } +} + +/// "state_get_package" RPC. +pub struct GetPackage {} + +#[async_trait] +impl RpcWithParams for GetPackage { + const METHOD: &'static str = "state_get_package"; + type RequestParams = GetPackageParams; + type ResponseResult = GetPackageResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = params.block_identifier.map(GlobalStateIdentifier::from); + let identifier = params.package_identifier.into_port_package_identifier(); + + let (package, merkle_proof) = match node_client + .read_package(state_identifier, identifier) + .await + .map_err(|err| Error::NodeRequest("package", err))? + .ok_or(Error::AddressableEntityNotFound)? + { + PackageResponse::Package(package_with_proof) => { + let (package, proof) = package_with_proof.into_inner(); + (PackageWithBackwardCompat::Package(package), proof) + } + PackageResponse::ContractPackage(contract_package_with_proof) => { + let (contract_package, proof) = contract_package_with_proof.into_inner(); + ( + PackageWithBackwardCompat::ContractPackage(contract_package), + proof, + ) + } + }; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + package, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +#[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] +/// Options for dictionary item lookups. +pub enum DictionaryIdentifier { + /// Lookup a dictionary item via an Account's named keys. + AccountNamedKey { + /// The account key as a formatted string whose named keys contains dictionary_name. + key: String, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via a Contract's named keys. + ContractNamedKey { + /// The contract key as a formatted string whose named keys contains dictionary_name. + key: String, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via an entities named keys. + EntityNamedKey { + /// The entity address formatted as a string. + key: String, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its seed URef. + URef { + /// The dictionary's seed URef. + seed_uref: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its unique key. + Dictionary(String), +} + +/// Params for "state_get_dictionary_item" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDictionaryItemParams { + /// Hash of the state root + pub state_root_hash: Digest, + /// The Dictionary query identifier. + pub dictionary_identifier: DictionaryIdentifier, +} + +impl DocExample for GetDictionaryItemParams { + fn doc_example() -> &'static Self { + &GET_DICTIONARY_ITEM_PARAMS + } +} + +/// Result for "state_get_dictionary_item" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetDictionaryItemResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The key under which the value is stored. + pub dictionary_key: String, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for GetDictionaryItemResult { + fn doc_example() -> &'static Self { + &GET_DICTIONARY_ITEM_RESULT + } +} + +/// "state_get_dictionary_item" RPC. +pub struct GetDictionaryItem {} + +#[async_trait] +impl RpcWithParams for GetDictionaryItem { + const METHOD: &'static str = "state_get_dictionary_item"; + type RequestParams = GetDictionaryItemParams; + type ResponseResult = GetDictionaryItemResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let state_identifier = GlobalStateIdentifier::StateRootHash(params.state_root_hash); + + let dictionary_key = match params.dictionary_identifier { + DictionaryIdentifier::AccountNamedKey { + key, + dictionary_name, + dictionary_item_key, + } => { + let hash = AccountHash::from_formatted_str(&key) + .map_err(|err| Error::InvalidDictionaryKey(err.to_string()))?; + DictionaryItemIdentifier::AccountNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } + } + DictionaryIdentifier::ContractNamedKey { + key, + dictionary_name, + dictionary_item_key, + } => { + let hash = Key::from_formatted_str(&key) + .map_err(|err| Error::InvalidDictionaryKey(err.to_string()))? + .into_hash_addr() + .ok_or_else(|| Error::InvalidDictionaryKey("not a hash address".to_owned()))?; + DictionaryItemIdentifier::ContractNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } + } + DictionaryIdentifier::EntityNamedKey { + key, + dictionary_name, + dictionary_item_key, + } => { + let addr = EntityAddr::from_formatted_str(&key) + .map_err(|err| Error::InvalidDictionaryKey(err.to_string()))?; + DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + } + } + DictionaryIdentifier::URef { + seed_uref, + dictionary_item_key, + } => { + let seed_uref = URef::from_formatted_str(&seed_uref) + .map_err(|err| Error::InvalidDictionaryKey(err.to_string()))?; + DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key, + } + } + DictionaryIdentifier::Dictionary(dictionary_item_key) => { + let key = Key::from_formatted_str(&dictionary_item_key) + .map_err(|err| Error::InvalidDictionaryKey(err.to_string()))?; + let dict_key = key.as_dictionary().ok_or_else(|| { + Error::InvalidDictionaryKey("not a dictionary key".to_owned()) + })?; + DictionaryItemIdentifier::DictionaryItem(*dict_key) + } + }; + let (key, result) = node_client + .query_dictionary_item(Some(state_identifier), dictionary_key) + .await + .map_err(|err| Error::NodeRequest("dictionary item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let (stored_value, merkle_proof) = result.into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + dictionary_key: key.to_formatted_string(), + stored_value, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +/// Params for "query_global_state" RPC +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct QueryGlobalStateParams { + /// The identifier used for the query. If not provided, the tip of the chain will be used. + pub state_identifier: Option, + /// The key under which to query. + pub key: Key, + /// The path components starting from the key as base. + #[serde(default)] + pub path: Vec, +} + +impl DocExample for QueryGlobalStateParams { + fn doc_example() -> &'static Self { + &QUERY_GLOBAL_STATE_PARAMS + } +} + +/// Result for "query_global_state" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct QueryGlobalStateResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The block header if a Block hash was provided. + pub block_header: Option, + /// The stored value. + pub stored_value: StoredValue, + /// The Merkle proof. + pub merkle_proof: String, +} + +impl DocExample for QueryGlobalStateResult { + fn doc_example() -> &'static Self { + &QUERY_GLOBAL_STATE_RESULT + } +} + +/// "query_global_state" RPC +pub struct QueryGlobalState {} + +#[async_trait] +impl RpcWithParams for QueryGlobalState { + const METHOD: &'static str = "query_global_state"; + type RequestParams = QueryGlobalStateParams; + type ResponseResult = QueryGlobalStateResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let block_header = match params.state_identifier { + Some(GlobalStateIdentifier::BlockHash(block_hash)) => { + let identifier = BlockIdentifier::Hash(block_hash); + Some(common::get_block_header(&*node_client, Some(identifier)).await?) + } + Some(GlobalStateIdentifier::BlockHeight(block_height)) => { + let identifier = BlockIdentifier::Height(block_height); + Some(common::get_block_header(&*node_client, Some(identifier)).await?) + } + _ => None, + }; + + let (stored_value, merkle_proof) = node_client + .query_global_state(params.state_identifier, params.key, params.path) + .await + .map_err(|err| Error::NodeRequest("global state item", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + block_header, + stored_value, + merkle_proof: common::encode_proof(&merkle_proof)?, + }) + } +} + +/// Identifier of a purse. +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub enum PurseIdentifier { + /// The main purse of the account identified by this public key. + MainPurseUnderPublicKey(PublicKey), + /// The main purse of the account identified by this account hash. + MainPurseUnderAccountHash(AccountHash), + /// The main purse of the account identified by this entity address. + MainPurseUnderEntityAddr(EntityAddr), + /// The purse identified by this URef. + PurseUref(URef), +} + +impl PurseIdentifier { + pub fn into_port_purse_identifier(self) -> PortPurseIdentifier { + match self { + Self::MainPurseUnderPublicKey(public_key) => PortPurseIdentifier::PublicKey(public_key), + Self::MainPurseUnderAccountHash(account_hash) => { + PortPurseIdentifier::Account(account_hash) + } + Self::MainPurseUnderEntityAddr(entity_addr) => PortPurseIdentifier::Entity(entity_addr), + Self::PurseUref(uref) => PortPurseIdentifier::Purse(uref), + } + } +} + +/// Params for "query_balance" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceParams { + /// The state identifier used for the query, if none is passed + /// the tip of the chain will be used. + pub state_identifier: Option, + /// The identifier to obtain the purse corresponding to balance query. + pub purse_identifier: PurseIdentifier, +} + +impl DocExample for QueryBalanceParams { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_PARAMS + } +} + +/// Result for "query_balance" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The available balance in motes (total balance - sum of all active holds). + pub balance: U512, +} + +impl DocExample for QueryBalanceResult { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_RESULT + } +} + +/// "query_balance" RPC. +pub struct QueryBalance {} + +#[async_trait] +impl RpcWithParams for QueryBalance { + const METHOD: &'static str = "query_balance"; + type RequestParams = QueryBalanceParams; + type ResponseResult = QueryBalanceResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let purse_id = params.purse_identifier.into_port_purse_identifier(); + let balance = node_client + .read_balance(params.state_identifier, purse_id) + .await + .map_err(|err| Error::NodeRequest("balance", err))?; + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + balance: balance.available_balance, + }) + } +} + +/// Params for "query_balance_details" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceDetailsParams { + /// The identifier for the state used for the query, if none is passed, + /// the latest block will be used. + pub state_identifier: Option, + /// The identifier to obtain the purse corresponding to balance query. + pub purse_identifier: PurseIdentifier, +} + +impl DocExample for QueryBalanceDetailsParams { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_DETAILS_PARAMS + } +} + +/// Result for "query_balance_details" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct QueryBalanceDetailsResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The purses total balance, not considering holds. + pub total_balance: U512, + /// The available balance in motes (total balance - sum of all active holds). + pub available_balance: U512, + /// A proof that the given value is present in the Merkle trie. + pub total_balance_proof: String, + /// Holds active at the requested point in time. + pub holds: Vec, +} + +impl DocExample for QueryBalanceDetailsResult { + fn doc_example() -> &'static Self { + &QUERY_BALANCE_DETAILS_RESULT + } +} + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct BalanceHoldWithProof { + /// The block time at which the hold was created. + pub time: BlockTime, + /// The amount in the hold. + pub amount: U512, + /// A proof that the given value is present in the Merkle trie. + pub proof: String, +} + +/// "query_balance_details" RPC. +pub struct QueryBalanceDetails {} + +#[async_trait] +impl RpcWithParams for QueryBalanceDetails { + const METHOD: &'static str = "query_balance_details"; + type RequestParams = QueryBalanceDetailsParams; + type ResponseResult = QueryBalanceDetailsResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let purse_id = params.purse_identifier.into_port_purse_identifier(); + let balance = node_client + .read_balance(params.state_identifier, purse_id) + .await + .map_err(|err| Error::NodeRequest("balance", err))?; + + let holds = balance + .balance_holds + .into_iter() + .flat_map(|(time, holds)| { + holds.into_iter().map(move |(_, (amount, proof))| { + Ok(BalanceHoldWithProof { + time, + amount, + proof: common::encode_proof(&vec![proof])?, + }) + }) + }) + .collect::, Error>>()?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + total_balance: balance.total_balance, + available_balance: balance.available_balance, + total_balance_proof: common::encode_proof(&vec![*balance.total_balance_proof])?, + holds, + }) + } +} + +/// Parameters for "state_get_trie" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +pub struct GetTrieParams { + /// A trie key. + pub trie_key: Digest, +} + +impl DocExample for GetTrieParams { + fn doc_example() -> &'static Self { + &GET_TRIE_PARAMS + } +} + +/// Result for "state_get_trie" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTrieResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// A list of keys read under the specified prefix. + #[schemars( + with = "Option", + description = "A trie from global state storage, bytesrepr serialized and hex-encoded." + )] + pub maybe_trie_bytes: Option, +} + +impl DocExample for GetTrieResult { + fn doc_example() -> &'static Self { + &GET_TRIE_RESULT + } +} + +/// `state_get_trie` RPC. +pub struct GetTrie {} + +#[async_trait] +impl RpcWithParams for GetTrie { + const METHOD: &'static str = "state_get_trie"; + type RequestParams = GetTrieParams; + type ResponseResult = GetTrieResult; + + async fn do_handle_request( + node_client: Arc, + params: Self::RequestParams, + ) -> Result { + let maybe_trie = node_client + .read_trie_bytes(params.trie_key) + .await + .map_err(|err| Error::NodeRequest("trie", err))?; + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + maybe_trie_bytes: maybe_trie.map(Into::into), + }) + } +} + +pub(crate) fn era_validators_from_snapshot( + snapshot: CLValue, + is_1x: bool, +) -> Result { + if is_1x { + //handle as pre-condor + //TODO add some context to the error + let seigniorage: BTreeMap = + snapshot.into_t().map_err(|_| Error::InvalidAuctionState)?; + Ok(seigniorage + .into_iter() + .map(|(era_id, recipients)| { + let validator_weights = recipients + .into_iter() + .filter_map(|(public_key, bid)| { + bid.total_stake().map(|stake| (public_key, stake)) + }) + .collect::(); + (era_id, validator_weights) + }) + .collect()) + } else { + //handle as condor + //TODO add some context to the error + let seigniorage: BTreeMap = + snapshot.into_t().map_err(|_| Error::InvalidAuctionState)?; + Ok(seigniorage + .into_iter() + .map(|(era_id, recipients)| { + let validator_weights = recipients + .into_iter() + .filter_map(|(public_key, bid)| { + bid.total_stake().map(|stake| (public_key, stake)) + }) + .collect::(); + (era_id, validator_weights) + }) + .collect()) + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::VecDeque, + { + convert::TryFrom, + iter::{self, FromIterator}, + }, + }; + + use crate::{rpcs::ErrorCode, ClientError}; + use casper_binary_port::{ + AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryResponse, + BinaryResponseAndRequest, Command, ContractInformation, DictionaryQueryResult, + ErrorCode as BinaryErrorCode, GetRequest, GlobalStateEntityQualifier, + GlobalStateQueryResult, InformationRequestTag, KeyPrefix, ValueWithProof, + }; + use casper_types::{ + addressable_entity::NamedKeyValue, + contracts::ContractPackage, + global_state::{TrieMerkleProof, TrieMerkleProofStep}, + system::auction::{ + Bid, BidKind, SeigniorageRecipientsSnapshotV1, SeigniorageRecipientsSnapshotV2, + ValidatorBid, + }, + testing::TestRng, + AccessRights, AddressableEntity, AvailableBlockRange, Block, ByteCode, ByteCodeHash, + ByteCodeKind, Contract, ContractRuntimeTag, ContractWasm, ContractWasmHash, EntityKind, + NamedKeys, PackageHash, ProtocolVersion, TestBlockBuilder, + }; + use pretty_assertions::assert_eq; + use rand::Rng; + + use super::*; + + #[tokio::test] + async fn should_read_state_item() { + let rng = &mut TestRng::new(); + let key = rng.gen::(); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + let merkle_proof = vec![TrieMerkleProof::new( + key, + stored_value.clone(), + VecDeque::from_iter([TrieMerkleProofStep::random(rng)]), + )]; + let expected = GlobalStateQueryResult::new(stored_value.clone(), merkle_proof.clone()); + + let resp = GetItem::do_handle_request( + Arc::new(ValidGlobalStateResultMock(expected.clone())), + GetItemParams { + state_root_hash: rng.gen(), + key, + path: vec![], + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetItemResult { + api_version: CURRENT_API_VERSION, + stored_value, + merkle_proof: common::encode_proof(&merkle_proof).expect("should encode proof"), + } + ); + } + + #[tokio::test] + async fn should_read_balance() { + let rng = &mut TestRng::new(); + let available_balance = rng.gen(); + let total_balance = rng.gen(); + let balance = BalanceResponse { + total_balance, + available_balance, + total_balance_proof: Box::new(TrieMerkleProof::new( + Key::Account(rng.gen()), + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + VecDeque::from_iter([TrieMerkleProofStep::random(rng)]), + )), + balance_holds: BTreeMap::new(), + }; + + let resp = GetBalance::do_handle_request( + Arc::new(ValidBalanceMock(balance.clone())), + GetBalanceParams { + state_root_hash: rng.gen(), + purse_uref: URef::new(rng.gen(), AccessRights::empty()).to_formatted_string(), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetBalanceResult { + api_version: CURRENT_API_VERSION, + balance_value: available_balance, + merkle_proof: common::encode_proof(&vec![*balance.total_balance_proof]) + .expect("should encode proof"), + } + ); + } + + #[tokio::test] + async fn should_handle_balance_not_found() { + let rng = &mut TestRng::new(); + + let err = GetBalance::do_handle_request( + Arc::new(BalancePurseNotFoundMock), + GetBalanceParams { + state_root_hash: rng.gen(), + purse_uref: URef::new(rng.gen(), AccessRights::empty()).to_formatted_string(), + }, + ) + .await + .expect_err("should fail request"); + + assert_eq!(err.code(), ErrorCode::PurseNotFound as i64); + } + + #[tokio::test] + async fn should_read_auction_info() { + struct ClientMock { + block: Block, + bids: Vec, + legacy_bids: Vec, + contract_hash: AddressableEntityHash, + snapshot: SeigniorageRecipientsSnapshotV2, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::Bid, + .. + } + ) => + { + let bids = self + .legacy_bids + .iter() + .cloned() + .map(|bid| StoredValue::Bid(bid.into())) + .collect::>(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(bids), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::BidAddr, + .. + } + ) => + { + let bids = self + .bids + .iter() + .cloned() + .map(StoredValue::BidKind) + .collect::>(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(bids), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::SystemEntityRegistry, + .. + } + ) => + { + let system_contracts = + iter::once((AUCTION.to_string(), self.contract_hash)) + .collect::>(); + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(system_contracts).unwrap()), + vec![], + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(result), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::AddressableEntity(_), + .. + } + ) => + { + let response: BinaryResponse = match req.clone().destructure() { + (_, GlobalStateEntityQualifier::Item { base_key: _, path }) + if path == vec!["seigniorage_recipients_snapshot_version"] => + { + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(1_u8).unwrap()), + vec![], + ); + BinaryResponse::from_value(result) + } + _ => { + let result = GlobalStateQueryResult::new( + StoredValue::CLValue( + CLValue::from_t(self.snapshot.clone()).unwrap(), + ), + vec![], + ); + BinaryResponse::from_value(result) + } + }; + Ok(BinaryResponseAndRequest::new(response, Bytes::from(vec![]))) + } + + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + let bid = BidKind::Validator(ValidatorBid::empty(PublicKey::random(rng), rng.gen()).into()); + let legacy_bid = Bid::empty(PublicKey::random(rng), rng.gen()); + + let resp = GetAuctionInfo::do_handle_request( + Arc::new(ClientMock { + block: Block::V2(block.clone()), + bids: vec![bid.clone()], + legacy_bids: vec![legacy_bid.clone()], + contract_hash: rng.gen(), + snapshot: Default::default(), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block.state_root_hash(), + block.height(), + Default::default(), + vec![BidKind::Unified(legacy_bid.into())] + ), + } + ); + } + + #[tokio::test] + async fn should_read_pre_1_5_auction_info() { + struct ClientMock { + block: Block, + bids: Vec, + legacy_bids: Vec, + contract_hash: AddressableEntityHash, + snapshot: SeigniorageRecipientsSnapshotV1, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::Bid, + .. + } + ) => + { + let bids = self + .legacy_bids + .iter() + .cloned() + .map(|bid| StoredValue::Bid(bid.into())) + .collect::>(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(bids), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::BidAddr, + .. + } + ) => + { + let bids = self + .bids + .iter() + .cloned() + .map(StoredValue::BidKind) + .collect::>(); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(bids), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure(), + ( + // system entity registry is not present in pre-1.5 state + None, + GlobalStateEntityQualifier::Item { + base_key: Key::SystemEntityRegistry, + .. + } + ) + ) => + { + let system_contracts = + iter::once((AUCTION.to_string(), self.contract_hash)) + .collect::>(); + let result = GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(system_contracts).unwrap()), + vec![], + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(result), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + // we should return nothing for entity hash in pre-1.5 state + base_key: Key::AddressableEntity(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + // we should query by contract hash in pre-1.5 state + base_key: Key::Hash(_), + .. + } + ) => + { + match req.clone().destructure() { + (_, GlobalStateEntityQualifier::Item { base_key: _, path }) + if path == vec!["seigniorage_recipients_snapshot_version"] => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )) + } + _ => { + let result = GlobalStateQueryResult::new( + StoredValue::CLValue( + CLValue::from_t(self.snapshot.clone()).unwrap(), + ), + vec![], + ); + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(result), + Bytes::from(vec![]), + )) + } + } + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + let bid = BidKind::Validator(ValidatorBid::empty(PublicKey::random(rng), rng.gen()).into()); + let legacy_bid = Bid::empty(PublicKey::random(rng), rng.gen()); + + let resp = GetAuctionInfo::do_handle_request( + Arc::new(ClientMock { + block: Block::V2(block.clone()), + bids: vec![bid.clone()], + legacy_bids: vec![legacy_bid.clone()], + contract_hash: rng.gen(), + snapshot: Default::default(), + }), + None, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block.state_root_hash(), + block.height(), + Default::default(), + vec![BidKind::Unified(legacy_bid.into())] + ), + } + ); + } + + #[tokio::test] + async fn should_fail_auction_info_when_block_not_found() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::AvailableBlockRange) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(AvailableBlockRange::RANGE_0_0), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let err = GetAuctionInfo::do_handle_request(Arc::new(ClientMock), None) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::NoSuchBlock as i64); + } + + #[tokio::test] + async fn should_read_entity() { + use casper_types::addressable_entity::{ActionThresholds, AssociatedKeys}; + + struct ClientMock { + addr: EntityAddr, + entity: AddressableEntity, + named_keys: NamedKeys, + entry_points: Vec, + bytecode: Option, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Entity) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(AddressableEntityInformation::new( + self.addr, + ValueWithProof::new(self.entity.clone(), vec![]), + self.bytecode + .as_ref() + .map(|bytecode| ValueWithProof::new(bytecode.clone(), vec![])), + )), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::ItemsByPrefix { + key_prefix: KeyPrefix::NamedKeysByEntity(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.named_keys + .iter() + .map(|(name, key)| { + StoredValue::NamedKey( + NamedKeyValue::from_concrete_values(*key, name.clone()) + .expect("should create named key"), + ) + }) + .collect::>(), + ), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::ItemsByPrefix { + key_prefix: KeyPrefix::EntryPointsV1ByEntity(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value( + self.entry_points + .iter() + .cloned() + .map(StoredValue::EntryPoint) + .collect::>(), + ), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::ItemsByPrefix { + key_prefix: KeyPrefix::EntryPointsV2ByEntity(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(Vec::::new()), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let entity = AddressableEntity::new( + PackageHash::new(rng.gen()), + ByteCodeHash::new(rng.gen()), + ProtocolVersion::V1_0_0, + rng.gen(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2), + ); + let addr: EntityAddr = rng.gen(); + + let named_key_count = rng.gen_range(0..10); + let named_keys: NamedKeys = + iter::repeat_with(|| (rng.random_string(1..36), Key::Hash(rng.gen()))) + .take(named_key_count) + .collect::>() + .into(); + let entry_point_count = rng.gen_range(0..10); + let entry_points = iter::repeat_with(|| { + EntryPointValue::new_v1_entry_point_value(EntryPoint::default_with_name( + rng.random_string(1..10), + )) + }) + .take(entry_point_count) + .collect::>(); + + let bytecode = rng + .gen::() + .then(|| ByteCode::new(ByteCodeKind::V1CasperWasm, rng.random_vec(10..50))); + + let entity_identifier = EntityIdentifier::random(rng); + + let resp = GetAddressableEntity::do_handle_request( + Arc::new(ClientMock { + addr, + entity: entity.clone(), + named_keys: named_keys.clone(), + entry_points: entry_points.clone(), + bytecode: bytecode.clone(), + }), + GetAddressableEntityParams { + block_identifier: None, + entity_identifier, + include_bytecode: Some(bytecode.is_some()), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAddressableEntityResult { + api_version: CURRENT_API_VERSION, + entity: EntityWithBackwardCompat::AddressableEntity { + entity, + named_keys, + entry_points, + bytecode: bytecode + .map(|bytecode| ByteCodeWithProof::new(bytecode, String::from("00000000"))), + }, + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_entity_legacy_account() { + use casper_types::account::{ActionThresholds, AssociatedKeys}; + + let rng = &mut TestRng::new(); + let account = Account::new( + rng.gen(), + NamedKeys::default(), + rng.gen(), + AssociatedKeys::default(), + ActionThresholds::default(), + ); + let entity_identifier = EntityIdentifier::AccountHash(rng.gen()); + + let resp = GetAddressableEntity::do_handle_request( + Arc::new(ValidLegacyAccountMock { + account: account.clone(), + }), + GetAddressableEntityParams { + block_identifier: None, + entity_identifier, + include_bytecode: None, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAddressableEntityResult { + api_version: CURRENT_API_VERSION, + entity: EntityWithBackwardCompat::Account(account), + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_entity_legacy_contract() { + struct ClientMock { + hash: ContractHash, + contract: Contract, + wasm: Option, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Entity) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(ContractInformation::new( + self.hash, + ValueWithProof::new(self.contract.clone(), vec![]), + self.wasm + .as_ref() + .map(|bytecode| ValueWithProof::new(bytecode.clone(), vec![])), + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let contract = Contract::new( + ContractPackageHash::new(rng.gen()), + ContractWasmHash::new(rng.gen()), + Default::default(), + Default::default(), + ProtocolVersion::V2_0_0, + ); + let hash = ContractHash::new(rng.gen()); + + let wasm = rng + .gen::() + .then(|| ContractWasm::new(rng.random_vec(10..50))); + + let entity_identifier = EntityIdentifier::random(rng); + + let resp = GetAddressableEntity::do_handle_request( + Arc::new(ClientMock { + hash, + contract: contract.clone(), + wasm: wasm.clone(), + }), + GetAddressableEntityParams { + block_identifier: None, + entity_identifier, + include_bytecode: Some(wasm.is_some()), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAddressableEntityResult { + api_version: CURRENT_API_VERSION, + entity: EntityWithBackwardCompat::Contract { + contract, + wasm: wasm + .map(|wasm| ContractWasmWithProof::new(wasm, String::from("00000000"))), + }, + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_reject_read_entity_when_non_existent() { + struct ClientMock; + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Entity) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let entity_identifier = EntityIdentifier::EntityAddr(rng.gen()); + + let err = GetAddressableEntity::do_handle_request( + Arc::new(ClientMock), + GetAddressableEntityParams { + block_identifier: None, + entity_identifier, + include_bytecode: None, + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::NoSuchAddressableEntity as i64); + } + + #[tokio::test] + async fn should_read_package() { + struct ClientMock { + package: Package, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Package) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(ValueWithProof::new( + self.package.clone(), + vec![], + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let package = Package::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); + + let package_identifier = PackageIdentifier::random(rng); + + let resp = GetPackage::do_handle_request( + Arc::new(ClientMock { + package: package.clone(), + }), + GetPackageParams { + block_identifier: None, + package_identifier, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetPackageResult { + api_version: CURRENT_API_VERSION, + package: PackageWithBackwardCompat::Package(package), + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_contract_package() { + struct ClientMock { + package: ContractPackage, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Package) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(ValueWithProof::new( + self.package.clone(), + vec![], + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let package = ContractPackage::new( + rng.gen(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); + + let package_identifier = PackageIdentifier::random(rng); + + let resp = GetPackage::do_handle_request( + Arc::new(ClientMock { + package: package.clone(), + }), + GetPackageParams { + block_identifier: None, + package_identifier, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetPackageResult { + api_version: CURRENT_API_VERSION, + package: PackageWithBackwardCompat::ContractPackage(package), + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_account_info() { + use casper_types::account::{ActionThresholds, AssociatedKeys}; + + let rng = &mut TestRng::new(); + let account = Account::new( + rng.gen(), + NamedKeys::default(), + rng.gen(), + AssociatedKeys::default(), + ActionThresholds::default(), + ); + let account_identifier = AccountIdentifier::random(rng); + + let resp = GetAccountInfo::do_handle_request( + Arc::new(ValidLegacyAccountMock { + account: account.clone(), + }), + GetAccountInfoParams { + block_identifier: None, + account_identifier, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetAccountInfoResult { + api_version: CURRENT_API_VERSION, + account, + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_reject_read_account_info_when_migrated() { + struct ClientMock { + block: Block, + entity_hash: AddressableEntityHash, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::Account(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(GlobalStateQueryResult::new( + StoredValue::CLValue( + CLValue::from_t(Key::contract_entity_key(self.entity_hash)) + .unwrap(), + ), + vec![], + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let entity_hash: AddressableEntityHash = rng.gen(); + let account_identifier = AccountIdentifier::random(rng); + + let err = GetAccountInfo::do_handle_request( + Arc::new(ClientMock { + block: block.clone(), + entity_hash, + }), + GetAccountInfoParams { + block_identifier: None, + account_identifier, + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::AccountMigratedToEntity as i64); + } + + #[tokio::test] + async fn should_reject_read_account_info_when_non_existent() { + struct ClientMock { + block: Block, + } + + #[async_trait] + impl NodeClient for ClientMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::Account(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_empty(), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let account_identifier = AccountIdentifier::random(rng); + + let err = GetAccountInfo::do_handle_request( + Arc::new(ClientMock { + block: block.clone(), + }), + GetAccountInfoParams { + block_identifier: None, + account_identifier, + }, + ) + .await + .expect_err("should reject request"); + + assert_eq!(err.code(), ErrorCode::NoSuchAccount as i64); + } + + #[tokio::test] + async fn should_read_dictionary_item() { + let rng = &mut TestRng::new(); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + + let uref = URef::new(rng.gen(), AccessRights::empty()); + let item_key = rng.random_string(5..10); + let query_result = GlobalStateQueryResult::new(stored_value.clone(), vec![]); + let dict_key = Key::dictionary(uref, item_key.as_bytes()); + + let resp = GetDictionaryItem::do_handle_request( + Arc::new(ValidDictionaryQueryResultMock { + dict_key, + query_result, + }), + GetDictionaryItemParams { + state_root_hash: rng.gen(), + dictionary_identifier: DictionaryIdentifier::URef { + seed_uref: uref.to_formatted_string(), + dictionary_item_key: item_key.clone(), + }, + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + GetDictionaryItemResult { + api_version: CURRENT_API_VERSION, + dictionary_key: dict_key.to_formatted_string(), + stored_value, + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_query_global_state_result() { + let rng = &mut TestRng::new(); + let block = Block::V2(TestBlockBuilder::new().build(rng)); + let stored_value = StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()); + let expected = GlobalStateQueryResult::new(stored_value.clone(), vec![]); + + let resp = QueryGlobalState::do_handle_request( + Arc::new(ValidGlobalStateResultWithBlockMock { + block: block.clone(), + result: expected.clone(), + }), + QueryGlobalStateParams { + state_identifier: Some(GlobalStateIdentifier::BlockHash(*block.hash())), + key: rng.gen(), + path: vec![], + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryGlobalStateResult { + api_version: CURRENT_API_VERSION, + block_header: Some(block.take_header()), + stored_value, + merkle_proof: String::from("00000000"), + } + ); + } + + #[tokio::test] + async fn should_read_query_balance_result() { + let rng = &mut TestRng::new(); + let available_balance = rng.gen(); + let total_balance = rng.gen(); + let balance = BalanceResponse { + total_balance, + available_balance, + total_balance_proof: Box::new(TrieMerkleProof::new( + Key::Account(rng.gen()), + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + VecDeque::from_iter([TrieMerkleProofStep::random(rng)]), + )), + balance_holds: BTreeMap::new(), + }; + + let resp = QueryBalance::do_handle_request( + Arc::new(ValidBalanceMock(balance.clone())), + QueryBalanceParams { + state_identifier: Some(GlobalStateIdentifier::random(rng)), + purse_identifier: PurseIdentifier::PurseUref(URef::new( + rng.gen(), + AccessRights::empty(), + )), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryBalanceResult { + api_version: CURRENT_API_VERSION, + balance: available_balance, + } + ); + } + + #[tokio::test] + async fn should_read_query_balance_details_result() { + let rng = &mut TestRng::new(); + let available_balance = rng.gen(); + let total_balance = rng.gen(); + let balance = BalanceResponse { + total_balance, + available_balance, + total_balance_proof: Box::new(TrieMerkleProof::new( + Key::Account(rng.gen()), + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + VecDeque::from_iter([TrieMerkleProofStep::random(rng)]), + )), + balance_holds: BTreeMap::new(), + }; + + let resp = QueryBalanceDetails::do_handle_request( + Arc::new(ValidBalanceMock(balance.clone())), + QueryBalanceDetailsParams { + state_identifier: Some(GlobalStateIdentifier::random(rng)), + purse_identifier: PurseIdentifier::PurseUref(URef::new( + rng.gen(), + AccessRights::empty(), + )), + }, + ) + .await + .expect("should handle request"); + + assert_eq!( + resp, + QueryBalanceDetailsResult { + api_version: CURRENT_API_VERSION, + total_balance, + available_balance, + total_balance_proof: common::encode_proof(&vec![*balance.total_balance_proof]) + .expect("should encode proof"), + holds: vec![], + } + ); + } + + struct ValidDictionaryQueryResultMock { + dict_key: Key, + query_result: GlobalStateQueryResult, + } + + #[async_trait] + impl NodeClient for ValidDictionaryQueryResultMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::DictionaryItem { .. } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(DictionaryQueryResult::new( + self.dict_key, + self.query_result.clone(), + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidGlobalStateResultMock(GlobalStateQueryResult); + + #[async_trait] + impl NodeClient for ValidGlobalStateResultMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { .. } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.0.clone()), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidGlobalStateResultWithBlockMock { + block: Block, + result: GlobalStateQueryResult, + } + + #[async_trait] + impl NodeClient for ValidGlobalStateResultWithBlockMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::BlockHeader) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.block.clone_header()), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { .. } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.result.clone()), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidLegacyAccountMock { + account: Account, + } + + #[async_trait] + impl NodeClient for ValidLegacyAccountMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::Information { info_type_tag, .. }) + if InformationRequestTag::try_from(info_type_tag) + == Ok(InformationRequestTag::Entity) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(AccountInformation::new( + self.account.clone(), + vec![], + )), + Bytes::from(vec![]), + )) + } + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Item { + base_key: Key::Account(_), + .. + } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(GlobalStateQueryResult::new( + StoredValue::Account(self.account.clone()), + vec![], + )), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct ValidBalanceMock(BalanceResponse); + + #[async_trait] + impl NodeClient for ValidBalanceMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Balance { .. } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::from_value(self.0.clone()), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } + + struct BalancePurseNotFoundMock; + + #[async_trait] + impl NodeClient for BalancePurseNotFoundMock { + async fn send_request( + &self, + req: Command, + ) -> Result { + match req { + Command::Get(GetRequest::State(req)) + if matches!( + req.clone().destructure().1, + GlobalStateEntityQualifier::Balance { .. } + ) => + { + Ok(BinaryResponseAndRequest::new( + BinaryResponse::new_error(BinaryErrorCode::PurseNotFound), + Bytes::from(vec![]), + )) + } + req => unimplemented!("unexpected request: {:?}", req), + } + } + } +} diff --git a/rpc_sidecar/src/rpcs/state/auction_state.rs b/rpc_sidecar/src/rpcs/state/auction_state.rs new file mode 100644 index 00000000..a367682d --- /dev/null +++ b/rpc_sidecar/src/rpcs/state/auction_state.rs @@ -0,0 +1,229 @@ +use std::collections::{btree_map::Entry, BTreeMap}; + +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use casper_types::{ + system::auction::{ + Bid, BidKind, DelegatorBid, DelegatorKind, EraValidators, Staking, ValidatorBid, + }, + Digest, EraId, PublicKey, U512, +}; + +use crate::rpcs::docs::DocExample; + +pub(crate) static ERA_VALIDATORS: Lazy = Lazy::new(|| { + use casper_types::SecretKey; + + let secret_key_1 = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + + let mut validator_weights = BTreeMap::new(); + validator_weights.insert(public_key_1, U512::from(10)); + + let mut era_validators = BTreeMap::new(); + era_validators.insert(EraId::from(10u64), validator_weights); + + era_validators +}); + +static AUCTION_INFO: Lazy = Lazy::new(|| { + use casper_types::{system::auction::DelegationRate, AccessRights, SecretKey, URef}; + use num_traits::Zero; + + let state_root_hash = Digest::from([11; Digest::LENGTH]); + let validator_secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let validator_public_key = PublicKey::from(&validator_secret_key); + + let mut bids = vec![]; + let validator_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + URef::new([250; 32], AccessRights::READ_ADD_WRITE), + U512::from(20), + DelegationRate::zero(), + 0, + u64::MAX, + 0, + ); + bids.push(BidKind::Validator(Box::new(validator_bid))); + + let delegator_secret_key = + SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + let delegator_bid = DelegatorBid::unlocked( + delegator_public_key.into(), + U512::from(10), + URef::new([251; 32], AccessRights::READ_ADD_WRITE), + validator_public_key, + ); + bids.push(BidKind::Delegator(Box::new(delegator_bid))); + + let height: u64 = 10; + let era_validators = ERA_VALIDATORS.clone(); + AuctionState::new(state_root_hash, height, era_validators, bids) +}); + +/// A validator's weight. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorWeight { + public_key: PublicKey, + weight: U512, +} + +impl JsonValidatorWeight { + pub fn new(public_key: PublicKey, weight: U512) -> Self { + Self { public_key, weight } + } +} + +/// The validators for the given era. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonEraValidators { + era_id: EraId, + validator_weights: Vec, +} + +impl JsonEraValidators { + pub fn new(era_id: EraId, validator_weights: Vec) -> Self { + Self { + era_id, + validator_weights, + } + } +} + +/* We should use the AuctionState struct from casper-types once it's ctor is updated */ +/// Data structure summarizing auction contract data. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct AuctionState { + /// Global state hash. + pub state_root_hash: Digest, + /// Block height. + pub block_height: u64, + /// Era validators. + pub era_validators: Vec, + /// All bids. + #[serde(with = "BTreeMapToArray::")] + bids: BTreeMap, +} + +impl AuctionState { + /// Create new instance of `AuctionState` + pub fn new( + state_root_hash: Digest, + block_height: u64, + era_validators: EraValidators, + bids: Vec, + ) -> Self { + let mut json_era_validators: Vec = Vec::new(); + for (era_id, validator_weights) in era_validators.iter() { + let mut json_validator_weights: Vec = Vec::new(); + for (public_key, weight) in validator_weights.iter() { + json_validator_weights.push(JsonValidatorWeight { + public_key: public_key.clone(), + weight: *weight, + }); + } + json_era_validators.push(JsonEraValidators { + era_id: *era_id, + validator_weights: json_validator_weights, + }); + } + + let staking = { + let mut staking: Staking = BTreeMap::new(); + for bid_kind in bids.iter().filter(|x| x.is_unified()) { + if let BidKind::Unified(bid) = bid_kind { + let public_key = bid.validator_public_key().clone(); + let validator_bid = ValidatorBid::unlocked( + bid.validator_public_key().clone(), + *bid.bonding_purse(), + *bid.staked_amount(), + *bid.delegation_rate(), + 0, + u64::MAX, + 0, + ); + let mut delegators: BTreeMap = BTreeMap::new(); + for (delegator_public_key, delegator) in bid.delegators() { + delegators.insert( + DelegatorKind::PublicKey(delegator_public_key.clone()), + DelegatorBid::from(delegator.clone()), + ); + } + staking.insert(public_key, (validator_bid, delegators)); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_validator()) { + if let BidKind::Validator(validator_bid) = bid_kind { + let public_key = validator_bid.validator_public_key().clone(); + staking.insert(public_key, (*validator_bid.clone(), BTreeMap::new())); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_delegator()) { + if let BidKind::Delegator(delegator_bid) = bid_kind { + let validator_public_key = delegator_bid.validator_public_key().clone(); + if let Entry::Occupied(mut occupant) = + staking.entry(validator_public_key.clone()) + { + let (_, delegators) = occupant.get_mut(); + delegators.insert( + delegator_bid.delegator_kind().clone(), + *delegator_bid.clone(), + ); + } + } + } + staking + }; + + let mut bids: BTreeMap = BTreeMap::new(); + for (public_key, (validator_bid, delegators)) in staking { + let bid = Bid::from_non_unified(validator_bid, delegators); + bids.insert(public_key, bid); + } + + AuctionState { + state_root_hash, + block_height, + era_validators: json_era_validators, + bids, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn example() -> &'static Self { + &AUCTION_INFO + } +} + +impl DocExample for AuctionState { + fn doc_example() -> &'static Self { + AuctionState::example() + } +} + +struct BidLabels; + +impl KeyValueLabels for BidLabels { + const KEY: &'static str = "public_key"; + const VALUE: &'static str = "bid"; +} + +impl KeyValueJsonSchema for BidLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("PublicKeyAndBid"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = + Some("A bid associated with the given public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The public key of the bidder."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The bid details."); +} diff --git a/rpc_sidecar/src/rpcs/state_get_auction_info_v2.rs b/rpc_sidecar/src/rpcs/state_get_auction_info_v2.rs new file mode 100644 index 00000000..b9248176 --- /dev/null +++ b/rpc_sidecar/src/rpcs/state_get_auction_info_v2.rs @@ -0,0 +1,478 @@ +//! RPCs of state_get_auction_info_v2. + +use std::{collections::BTreeMap, str, sync::Arc}; + +use crate::rpcs::state::ERA_VALIDATORS; +use async_trait::async_trait; +use casper_types::system::auction::ValidatorBid; +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::common; +use super::state::{ + era_validators_from_snapshot, fetch_bid_kinds, GetAuctionInfoParams, JsonEraValidators, + JsonValidatorWeight, +}; +use super::{ + docs::{DocExample, DOCS_EXAMPLE_API_VERSION}, + ApiVersion, Error, NodeClient, RpcError, RpcWithOptionalParams, CURRENT_API_VERSION, +}; +use casper_types::{ + addressable_entity::EntityKindTag, + system::{ + auction::{BidKind, DelegatorBid, EraValidators, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY}, + AUCTION, + }, + AddressableEntityHash, Digest, GlobalStateIdentifier, Key, PublicKey, U512, +}; + +static GET_AUCTION_INFO_RESULT: Lazy = Lazy::new(|| GetAuctionInfoResult { + api_version: DOCS_EXAMPLE_API_VERSION, + auction_state: AuctionState::doc_example().clone(), +}); +static AUCTION_INFO: Lazy = Lazy::new(|| { + use casper_types::{system::auction::DelegationRate, AccessRights, SecretKey, URef}; + use num_traits::Zero; + + let state_root_hash = Digest::from([11; Digest::LENGTH]); + let validator_secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let validator_public_key = PublicKey::from(&validator_secret_key); + + let mut bids = vec![]; + let validator_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + URef::new([250; 32], AccessRights::READ_ADD_WRITE), + U512::from(20), + DelegationRate::zero(), + 0, + u64::MAX, + 0, + ); + bids.push(BidKind::Validator(Box::new(validator_bid))); + + let delegator_secret_key = + SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + let delegator_bid = DelegatorBid::unlocked( + delegator_public_key.into(), + U512::from(10), + URef::new([251; 32], AccessRights::READ_ADD_WRITE), + validator_public_key, + ); + bids.push(BidKind::Delegator(Box::new(delegator_bid))); + + let height: u64 = 10; + let era_validators = ERA_VALIDATORS.clone(); + AuctionState::new(state_root_hash, height, era_validators, bids) +}); + +/// Result for "state_get_auction_info" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetAuctionInfoResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ApiVersion, + /// The auction state. + pub auction_state: AuctionState, +} + +impl DocExample for GetAuctionInfoResult { + fn doc_example() -> &'static Self { + &GET_AUCTION_INFO_RESULT + } +} + +/// "state_get_auction_info_v2" RPC. +pub struct GetAuctionInfo {} + +#[async_trait] +impl RpcWithOptionalParams for GetAuctionInfo { + const METHOD: &'static str = "state_get_auction_info_v2"; + type OptionalRequestParams = GetAuctionInfoParams; + type ResponseResult = GetAuctionInfoResult; + + async fn do_handle_request( + node_client: Arc, + maybe_params: Option, + ) -> Result { + let block_identifier = maybe_params.map(|params| params.block_identifier); + let block_header = common::get_block_header(&*node_client, block_identifier).await?; + + let state_identifier = block_identifier.map(GlobalStateIdentifier::from); + let state_identifier = + state_identifier.unwrap_or(GlobalStateIdentifier::BlockHeight(block_header.height())); + + let is_1x = block_header.protocol_version().value().major == 1; + let bids = fetch_bid_kinds(node_client.clone(), state_identifier, is_1x).await?; + + // always retrieve the latest system contract registry, old versions of the node + // did not write it to the global state + let (registry_value, _) = node_client + .query_global_state(Some(state_identifier), Key::SystemEntityRegistry, vec![]) + .await + .map_err(|err| Error::NodeRequest("system contract registry", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner(); + let registry: BTreeMap = registry_value + .into_cl_value() + .ok_or(Error::InvalidAuctionState)? + .into_t() + .map_err(|_| Error::InvalidAuctionState)?; + let &auction_hash = registry.get(AUCTION).ok_or(Error::InvalidAuctionState)?; + + let (snapshot_value, _) = if let Some(result) = node_client + .query_global_state( + Some(state_identifier), + Key::addressable_entity_key(EntityKindTag::System, auction_hash), + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + ) + .await + .map_err(|err| Error::NodeRequest("auction snapshot", err))? + { + result.into_inner() + } else { + node_client + .query_global_state( + Some(state_identifier), + Key::Hash(auction_hash.value()), + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + ) + .await + .map_err(|err| Error::NodeRequest("auction snapshot", err))? + .ok_or(Error::GlobalStateEntryNotFound)? + .into_inner() + }; + let snapshot = snapshot_value + .into_cl_value() + .ok_or(Error::InvalidAuctionState)?; + + let validators = era_validators_from_snapshot(snapshot, is_1x)?; + let auction_state = AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + validators, + bids, + ); + + Ok(Self::ResponseResult { + api_version: CURRENT_API_VERSION, + auction_state, + }) + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct BidKindWrapper { + public_key: PublicKey, + bid: BidKind, +} + +impl BidKindWrapper { + /// ctor + pub fn new(public_key: PublicKey, bid: BidKind) -> Self { + Self { public_key, bid } + } +} + +/// Data structure summarizing auction contract data. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, JsonSchema)] +#[serde(deny_unknown_fields)] +#[schemars(rename = "AuctionStateV2")] +pub(crate) struct AuctionState { + /// Global state hash. + state_root_hash: Digest, + /// Block height. + block_height: u64, + /// Era validators. + era_validators: Vec, + /// All bids. + bids: Vec, +} + +impl AuctionState { + /// Ctor + pub fn new( + state_root_hash: Digest, + block_height: u64, + era_validators: EraValidators, + bids: Vec, + ) -> Self { + let mut json_era_validators: Vec = Vec::new(); + for (era_id, validator_weights) in era_validators.iter() { + let mut json_validator_weights: Vec = Vec::new(); + for (public_key, weight) in validator_weights.iter() { + json_validator_weights.push(JsonValidatorWeight::new(public_key.clone(), *weight)); + } + json_era_validators.push(JsonEraValidators::new(*era_id, json_validator_weights)); + } + + let bids = bids + .into_iter() + .map(|bid_kind| BidKindWrapper::new(bid_kind.validator_public_key(), bid_kind)) + .collect(); + + AuctionState { + state_root_hash, + block_height, + era_validators: json_era_validators, + bids, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn example() -> &'static Self { + &AUCTION_INFO + } +} + +impl DocExample for AuctionState { + fn doc_example() -> &'static Self { + AuctionState::example() + } +} + +#[cfg(test)] +mod tests { + use crate::{ + rpcs::{ + state_get_auction_info_v2::{AuctionState, GetAuctionInfo, GetAuctionInfoResult}, + test_utils::BinaryPortMock, + RpcWithOptionalParams, CURRENT_API_VERSION, + }, + SUPPORTED_PROTOCOL_VERSION, + }; + use casper_binary_port::InformationRequest; + use casper_types::{ + system::{ + auction::{ + Bid, BidKind, DelegatorKind, SeigniorageRecipientV1, SeigniorageRecipientV2, + SeigniorageRecipientsV1, SeigniorageRecipientsV2, ValidatorBid, + }, + AUCTION, + }, + testing::TestRng, + AddressableEntityHash, CLValue, EraId, PublicKey, StoredValue, TestBlockV1Builder, U512, + }; + use rand::Rng; + use std::{collections::BTreeMap, sync::Arc}; + + #[tokio::test] + async fn should_read_pre_condor_auction_info_with_addressable_entity_off() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + let auction_hash: AddressableEntityHash = AddressableEntityHash::new(rng.gen()); + let block_header = TestBlockV1Builder::new() + .build_versioned(rng) + .clone_header(); + let registry = BTreeMap::from([(AUCTION.to_string(), auction_hash)]); + let public_key_1 = PublicKey::random(rng); + let public_key_2 = PublicKey::random(rng); + let recipient_v1 = SeigniorageRecipientV1::new( + U512::from(125), + 50, + BTreeMap::from([(public_key_2, U512::from(500))]), + ); + let recipients_1: BTreeMap = + BTreeMap::from([(public_key_1.clone(), recipient_v1)]); + let v1_recipients: BTreeMap = + BTreeMap::from([(EraId::new(100), recipients_1)]); + let stored_value = StoredValue::CLValue(CLValue::from_t(v1_recipients.clone()).unwrap()); + let bid_1 = Bid::empty(PublicKey::random(rng), rng.gen()); + let bids = vec![bid_1]; + let state_identifier = Some(casper_types::GlobalStateIdentifier::BlockHeight( + block_header.height(), + )); + binary_port_mock + .add_block_header_req_res(block_header.clone(), InformationRequest::BlockHeader(None)) + .await; + binary_port_mock + .add_bids_fetch_res(bids.clone(), state_identifier) + .await; + binary_port_mock + .add_system_registry(state_identifier, registry) + .await; + binary_port_mock + .add_seigniorage_snapshot_under_addressable_entity(state_identifier, auction_hash, None) + .await; + binary_port_mock + .add_seigniorage_snapshot_under_key_hash( + state_identifier, + auction_hash, + Some(stored_value), + ) + .await; + let resp = GetAuctionInfo::do_handle_request(Arc::new(binary_port_mock), None) + .await + .expect("should handle request"); + let bids = bids + .into_iter() + .map(|b| BidKind::Unified(Box::new(b))) + .collect(); + + let expected_validators = BTreeMap::from([( + EraId::new(100), + BTreeMap::from([(public_key_1, U512::from(625))]), + )]); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + expected_validators, + bids + ), + } + ); + } + + #[tokio::test] + async fn should_read_condor_auction_info_with_addressable_entity_off() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + let auction_hash: AddressableEntityHash = AddressableEntityHash::new(rng.gen()); + let block_header = TestBlockV1Builder::new() + .protocol_version(SUPPORTED_PROTOCOL_VERSION) + .build_versioned(rng) + .clone_header(); + let registry = BTreeMap::from([(AUCTION.to_string(), auction_hash)]); + let public_key_1 = PublicKey::random(rng); + let public_key_2 = PublicKey::random(rng); + let public_key_3 = PublicKey::random(rng); + let delegator_kind_1 = DelegatorKind::PublicKey(public_key_2); + let delegator_kind_2 = DelegatorKind::PublicKey(public_key_3); + let recipient_v2 = SeigniorageRecipientV2::new( + U512::from(125), + 50, + BTreeMap::from([(delegator_kind_1, U512::from(500))]), + BTreeMap::from([(delegator_kind_2, 75)]), + ); + let recipients_1: BTreeMap = + BTreeMap::from([(public_key_1.clone(), recipient_v2)]); + let v2_recipients: BTreeMap = + BTreeMap::from([(EraId::new(100), recipients_1)]); + let stored_value = StoredValue::CLValue(CLValue::from_t(v2_recipients.clone()).unwrap()); + let state_identifier = Some(casper_types::GlobalStateIdentifier::BlockHeight( + block_header.height(), + )); + let validator_bid = ValidatorBid::empty(PublicKey::random(rng), rng.gen()); + let bid_kind_1 = BidKind::Validator(Box::new(validator_bid)); + let bid_kinds = vec![bid_kind_1]; + binary_port_mock + .add_block_header_req_res(block_header.clone(), InformationRequest::BlockHeader(None)) + .await; + binary_port_mock + .add_bid_kinds_fetch_res(bid_kinds.clone(), state_identifier) + .await; + binary_port_mock + .add_system_registry(state_identifier, registry) + .await; + binary_port_mock + .add_seigniorage_snapshot_under_addressable_entity(state_identifier, auction_hash, None) + .await; + binary_port_mock + .add_seigniorage_snapshot_under_key_hash( + state_identifier, + auction_hash, + Some(stored_value), + ) + .await; + let resp = GetAuctionInfo::do_handle_request(Arc::new(binary_port_mock), None) + .await + .expect("should handle request"); + let expected_validators = BTreeMap::from([( + EraId::new(100), + BTreeMap::from([(public_key_1, U512::from(625))]), + )]); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + expected_validators, + bid_kinds + ), + } + ); + } + + #[tokio::test] + async fn should_read_condor_auction_info_with_addressable_entity_on() { + let rng = &mut TestRng::new(); + let mut binary_port_mock = BinaryPortMock::new(); + let auction_hash: AddressableEntityHash = AddressableEntityHash::new(rng.gen()); + let block_header = TestBlockV1Builder::new() + .protocol_version(SUPPORTED_PROTOCOL_VERSION) + .build_versioned(rng) + .clone_header(); + let registry = BTreeMap::from([(AUCTION.to_string(), auction_hash)]); + let public_key_1 = PublicKey::random(rng); + let public_key_2 = PublicKey::random(rng); + let public_key_3 = PublicKey::random(rng); + let delegator_kind_1 = DelegatorKind::PublicKey(public_key_2); + let delegator_kind_2 = DelegatorKind::PublicKey(public_key_3); + let recipient_v2 = SeigniorageRecipientV2::new( + U512::from(125), + 50, + BTreeMap::from([(delegator_kind_1, U512::from(500))]), + BTreeMap::from([(delegator_kind_2, 75)]), + ); + let recipients_1: BTreeMap = + BTreeMap::from([(public_key_1.clone(), recipient_v2)]); + let v2_recipients: BTreeMap = + BTreeMap::from([(EraId::new(100), recipients_1)]); + let stored_value = StoredValue::CLValue(CLValue::from_t(v2_recipients.clone()).unwrap()); + let state_identifier = Some(casper_types::GlobalStateIdentifier::BlockHeight( + block_header.height(), + )); + let validator_bid = ValidatorBid::empty(PublicKey::random(rng), rng.gen()); + let bid_kind_1 = BidKind::Validator(Box::new(validator_bid)); + let bid_kinds = vec![bid_kind_1]; + binary_port_mock + .add_block_header_req_res(block_header.clone(), InformationRequest::BlockHeader(None)) + .await; + binary_port_mock + .add_bid_kinds_fetch_res(bid_kinds.clone(), state_identifier) + .await; + binary_port_mock + .add_system_registry(state_identifier, registry) + .await; + binary_port_mock + .add_seigniorage_snapshot_under_addressable_entity( + state_identifier, + auction_hash, + Some(stored_value), + ) + .await; + let resp = GetAuctionInfo::do_handle_request(Arc::new(binary_port_mock), None) + .await + .expect("should handle request"); + let expected_validators = BTreeMap::from([( + EraId::new(100), + BTreeMap::from([(public_key_1, U512::from(625))]), + )]); + + assert_eq!( + resp, + GetAuctionInfoResult { + api_version: CURRENT_API_VERSION, + auction_state: AuctionState::new( + *block_header.state_root_hash(), + block_header.height(), + expected_validators, + bid_kinds + ), + } + ); + } +} diff --git a/rpc_sidecar/src/rpcs/test_utils.rs b/rpc_sidecar/src/rpcs/test_utils.rs new file mode 100644 index 00000000..5c4c86e5 --- /dev/null +++ b/rpc_sidecar/src/rpcs/test_utils.rs @@ -0,0 +1,177 @@ +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; + +use async_trait::async_trait; +use casper_binary_port::{ + BinaryResponse, BinaryResponseAndRequest, Command, GetRequest, GlobalStateEntityQualifier, + GlobalStateQueryResult, GlobalStateRequest, InformationRequest, +}; +use casper_types::{ + addressable_entity::EntityKindTag, + bytesrepr::ToBytes, + system::auction::{Bid, BidKind, EraInfo, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY}, + AddressableEntityHash, BlockHeader, CLValue, GlobalStateIdentifier, Key, KeyTag, StoredValue, +}; +use tokio::sync::Mutex; + +use crate::{ClientError, NodeClient}; + +pub(crate) struct BinaryPortMock { + request_responses: Arc>>, +} + +impl BinaryPortMock { + pub fn new() -> Self { + Self { + request_responses: Arc::new(Mutex::new(vec![])), + } + } + + pub async fn add_era_info_req_res( + &mut self, + era_info: EraInfo, + state_identifier: Option, + ) { + let req = GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Item { + base_key: Key::EraSummary, + path: vec![], + }, + ); + let req = Command::Get(GetRequest::State(Box::new(req))); + let stored_value = StoredValue::EraInfo(era_info); + let res = BinaryResponse::from_value(GlobalStateQueryResult::new(stored_value, vec![])); + self.when_then(req, res).await; + } + + pub async fn add_block_header_req_res( + &mut self, + block_header: BlockHeader, + information_request: InformationRequest, + ) { + let get_request = information_request + .try_into() + .expect("should create request"); + let req = Command::Get(get_request); + let res = BinaryResponse::from_option(Some(block_header)); + self.when_then(req, res).await; + } + + pub async fn add_bid_kinds_fetch_res( + &mut self, + bid_kinds: Vec, + state_identifier: Option, + ) { + let req = GetRequest::State(Box::new(GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::BidAddr, + }, + ))); + let stored_values: Vec = + bid_kinds.into_iter().map(StoredValue::BidKind).collect(); + let res = BinaryResponse::from_value(stored_values); + self.when_then(Command::Get(req), res).await; + } + + pub async fn add_bids_fetch_res( + &mut self, + bids: Vec, + state_identifier: Option, + ) { + let req = GetRequest::State(Box::new(GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::Bid, + }, + ))); + + let stored_values: Vec = bids + .into_iter() + .map(|b| StoredValue::Bid(Box::new(b))) + .collect(); + let res = BinaryResponse::from_value(stored_values); + self.when_then(Command::Get(req), res).await; + } + + pub async fn add_system_registry( + &mut self, + state_identifier: Option, + registry: BTreeMap, + ) { + let req = GetRequest::State(Box::new(GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Item { + base_key: Key::SystemEntityRegistry, + path: vec![], + }, + ))); + let cl_value = CLValue::from_t(registry).unwrap(); + let stored_value = StoredValue::CLValue(cl_value); + + let res = BinaryResponse::from_value(GlobalStateQueryResult::new(stored_value, vec![])); + self.when_then(Command::Get(req), res).await; + } + + pub async fn add_seigniorage_snapshot_under_addressable_entity( + &mut self, + state_identifier: Option, + auction_hash: AddressableEntityHash, + maybe_seigniorage_snapshot: Option, + ) { + let base_key = Key::addressable_entity_key(EntityKindTag::System, auction_hash); + let req = GetRequest::State(Box::new(GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Item { + base_key, + path: vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + }, + ))); + let res = BinaryResponse::from_option( + maybe_seigniorage_snapshot.map(|v| GlobalStateQueryResult::new(v, vec![])), + ); + self.when_then(Command::Get(req), res).await; + } + + pub async fn add_seigniorage_snapshot_under_key_hash( + &mut self, + state_identifier: Option, + auction_hash: AddressableEntityHash, + maybe_seigniorage_snapshot: Option, + ) { + let base_key = Key::Hash(auction_hash.value()); + let req = GetRequest::State(Box::new(GlobalStateRequest::new( + state_identifier, + GlobalStateEntityQualifier::Item { + base_key, + path: vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_owned()], + }, + ))); + let res = BinaryResponse::from_option( + maybe_seigniorage_snapshot.map(|v| GlobalStateQueryResult::new(v, vec![])), + ); + self.when_then(Command::Get(req), res).await; + } + + pub async fn when_then(&self, when: Command, then: BinaryResponse) { + let payload = when.to_bytes().unwrap(); + let response_and_request = BinaryResponseAndRequest::new(then, payload.into()); + let mut guard = self.request_responses.lock().await; + guard.push((when, response_and_request)); + } +} + +#[async_trait] +impl NodeClient for BinaryPortMock { + async fn send_request(&self, req: Command) -> Result { + let mut guard = self.request_responses.lock().await; + let (request, response) = guard.remove(0); + if request != req { + panic!( + "Got unexpected request: {:?}. \n\n Expected {:?}", + req, request + ) + } + Ok(response) + } +} diff --git a/rpc_sidecar/src/rpcs/types.rs b/rpc_sidecar/src/rpcs/types.rs new file mode 100644 index 00000000..1f451d10 --- /dev/null +++ b/rpc_sidecar/src/rpcs/types.rs @@ -0,0 +1,60 @@ +use casper_types::{ + contract_messages::Messages, execution::Effects, BlockHash, Digest, EraId, Gas, PublicKey, + Timestamp, Transfer, +}; +use schemars::JsonSchema; + +#[allow(dead_code)] +#[derive(JsonSchema)] +#[schemars(deny_unknown_fields, rename = "MinimalBlockInfo")] +/// Minimal info about a `Block` needed to satisfy the node status request. +pub(crate) struct MinimalBlockInfoSchema { + hash: BlockHash, + timestamp: Timestamp, + era_id: EraId, + height: u64, + state_root_hash: Digest, + creator: PublicKey, +} + +#[allow(dead_code)] +#[derive(JsonSchema)] +#[schemars(rename = "SpeculativeExecutionResult")] +pub(crate) struct SpeculativeExecutionResultSchema { + /// Block hash against which the execution was performed. + block_hash: BlockHash, + /// List of transfers that happened during execution. + transfers: Vec, + /// Gas limit. + limit: Gas, + /// Gas consumed. + consumed: Gas, + /// Execution effects. + effects: Effects, + /// Messages emitted during execution. + messages: Messages, + /// Did the wasm execute successfully? + error: Option, +} + +#[cfg(test)] +mod tests { + use casper_binary_port::SpeculativeExecutionResult; + use casper_types::testing::TestRng; + use schemars::schema_for; + use serde_json::json; + + use crate::rpcs::types::SpeculativeExecutionResultSchema; + + #[test] + pub fn speculative_execution_result_should_validate_against_schema() { + let mut test_rng = TestRng::new(); + let ser = SpeculativeExecutionResult::random(&mut test_rng); + let schema_struct = schema_for!(SpeculativeExecutionResultSchema); + + let schema = json!(schema_struct); + let instance = serde_json::to_value(&ser).expect("should json-serialize result"); + + assert!(jsonschema::is_valid(&schema, &instance)); + } +} diff --git a/rpc_sidecar/src/speculative_exec_config.rs b/rpc_sidecar/src/speculative_exec_config.rs new file mode 100644 index 00000000..a1a96a44 --- /dev/null +++ b/rpc_sidecar/src/speculative_exec_config.rs @@ -0,0 +1,55 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use datasize::DataSize; +use serde::Deserialize; + +/// Default binding address for the speculative execution RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); +const DEFAULT_PORT: u16 = 1; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 1; +/// Default max body bytes (2.5MB). +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct Config { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// IP address to bind JSON-RPC speculative execution server to. + pub ip_address: IpAddr, + /// Port to bind JSON-RPC speculative execution server to. + pub port: u16, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl Config { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + Config { + enable_server: false, + ip_address: DEFAULT_IP_ADDRESS, + port: DEFAULT_PORT, + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for Config { + fn default() -> Self { + Config::new() + } +} diff --git a/rpc_sidecar/src/speculative_exec_server.rs b/rpc_sidecar/src/speculative_exec_server.rs new file mode 100644 index 00000000..10ea3bfe --- /dev/null +++ b/rpc_sidecar/src/speculative_exec_server.rs @@ -0,0 +1,71 @@ +use std::sync::Arc; + +use hyper::server::{conn::AddrIncoming, Builder}; + +use casper_json_rpc::{CorsOrigin, RequestHandlersBuilder}; + +use crate::{ + node_client::NodeClient, + rpcs::{ + speculative_exec::{SpeculativeExec, SpeculativeExecTxn, SpeculativeRpcDiscover}, + RpcWithParams, RpcWithoutParams, + }, +}; + +/// The URL path for all JSON-RPC requests. +pub const SPECULATIVE_EXEC_API_PATH: &str = "rpc"; + +pub const SPECULATIVE_EXEC_SERVER_NAME: &str = "speculative execution"; + +/// Run the speculative execution server. +pub async fn run( + node: Arc, + builder: Builder, + qps_limit: u64, + max_body_bytes: u32, + cors_origin: String, +) { + let mut handlers = RequestHandlersBuilder::new(); + SpeculativeExecTxn::register_as_handler(node.clone(), &mut handlers); + SpeculativeExec::register_as_handler(node.clone(), &mut handlers); + SpeculativeRpcDiscover::register_as_handler(node, &mut handlers); + let handlers = handlers.build(); + + match cors_origin.as_str() { + "" => { + super::rpcs::run( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + ) + .await; + } + "*" => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + CorsOrigin::Any, + ) + .await + } + _ => { + super::rpcs::run_with_cors( + builder, + handlers, + qps_limit, + max_body_bytes, + SPECULATIVE_EXEC_API_PATH, + SPECULATIVE_EXEC_SERVER_NAME, + CorsOrigin::Specified(cors_origin), + ) + .await + } + } +} diff --git a/rpc_sidecar/src/testing/mod.rs b/rpc_sidecar/src/testing/mod.rs new file mode 100644 index 00000000..0dfb9cd9 --- /dev/null +++ b/rpc_sidecar/src/testing/mod.rs @@ -0,0 +1,151 @@ +use std::sync::Arc; +use std::time::Duration; + +use casper_binary_port::{ + BinaryMessage, BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, + GetRequest, GlobalStateQueryResult, +}; +use casper_types::bytesrepr::Bytes; +use casper_types::{bytesrepr::ToBytes, CLValue, StoredValue}; +use futures::{SinkExt, StreamExt}; +use tokio::sync::Notify; +use tokio::task::JoinHandle; +use tokio::{ + net::{TcpListener, TcpStream}, + time::sleep, +}; +use tokio_util::codec::{Encoder, Framed}; + +use crate::encode_request; + +const LOCALHOST: &str = "127.0.0.1"; +const MESSAGE_SIZE: u32 = 1024 * 1024 * 10; + +pub struct BinaryPortMock { + port: u16, + response: Vec, + number_of_responses: u8, +} + +impl BinaryPortMock { + pub fn new(port: u16, response: Vec, number_of_responses: u8) -> Self { + Self { + port, + response, + number_of_responses, + } + } + + pub async fn start(&self, shutdown: Arc) { + let port = self.port; + let addr = format!("{}:{}", LOCALHOST, port); + let listener = TcpListener::bind(addr.clone()) + .await + .expect("failed to listen"); + loop { + tokio::select! { + _ = shutdown.notified() => { + break; + } + val = listener.accept() => { + match val { + Ok((stream, _addr)) => { + let response_payload = self.response.clone(); + tokio::spawn(handle_client(stream, response_payload, self.number_of_responses)); + } + Err(io_err) => { + println!("acceptance failure: {:?}", io_err); + } + } + } + } + } + } +} + +async fn handle_client(stream: TcpStream, response: Vec, number_of_responses: u8) { + let mut client = Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE)); + + let next_message = client.next().await; + if next_message.is_some() { + tokio::spawn({ + async move { + for _ in 0..number_of_responses { + let _ = client.send(BinaryMessage::new(response.clone())).await; + } + } + }); + } +} + +pub fn get_port() -> u16 { + portpicker::pick_unused_port().unwrap() +} + +pub async fn start_mock_binary_port_responding_with_stored_value( + port: u16, + request_id: Option, + number_of_responses: Option, + shutdown: Arc, +) -> JoinHandle<()> { + let value = StoredValue::CLValue(CLValue::from_t("Foo").unwrap()); + let data = GlobalStateQueryResult::new(value, vec![]); + let val = BinaryResponse::from_value(data); + let request = get_dummy_request_payload(request_id); + let response = BinaryResponseAndRequest::new(val, request); + start_mock_binary_port( + port, + response.to_bytes().unwrap(), + number_of_responses.unwrap_or(1), // Single response by default + shutdown, + ) + .await +} + +pub async fn start_mock_binary_port_responding_with_given_response( + port: u16, + request_id: Option, + number_of_responses: Option, + shutdown: Arc, + binary_response: BinaryResponse, +) -> JoinHandle<()> { + let request = get_dummy_request_payload(request_id); + let response = BinaryResponseAndRequest::new(binary_response, request); + start_mock_binary_port( + port, + response.to_bytes().unwrap(), + number_of_responses.unwrap_or(1), // Single response by default + shutdown, + ) + .await +} + +pub async fn start_mock_binary_port( + port: u16, + data: Vec, + number_of_responses: u8, + shutdown: Arc, +) -> JoinHandle<()> { + let handler = tokio::spawn(async move { + let binary_port = BinaryPortMock::new(port, data, number_of_responses); + binary_port.start(shutdown).await; + }); + sleep(Duration::from_secs(3)).await; // This should be handled differently, preferably the mock binary port should inform that it already bound to the port + handler +} + +pub(crate) fn get_dummy_request() -> Command { + Command::Get(GetRequest::Information { + info_type_tag: 0, + key: vec![], + }) +} + +pub(crate) fn get_dummy_request_payload(request_id: Option) -> Bytes { + let dummy_request = get_dummy_request(); + let bytes = encode_request(&dummy_request, request_id.unwrap_or_default()).unwrap(); + let mut codec = BinaryMessageCodec::new(u32::MAX); + let mut buf = bytes::BytesMut::new(); + codec.encode(BinaryMessage::new(bytes), &mut buf).unwrap(); + Bytes::from(buf.freeze().to_vec()) +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index bbd3374c..f02e0fc6 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.74.0" +channel = "1.84.0" components = [ "rustfmt", "clippy" ] targets = [ "wasm32-unknown-unknown" ] -profile = "minimal" \ No newline at end of file +profile = "minimal" diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index ebc56509..93ea2e93 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -1,94 +1,65 @@ [package] -name = "casper-event-sidecar" -authors = ["George Williamson ", "Jakub Zajkowski "] -version = "1.0.0" -edition = "2018" +name = "casper-sidecar" +version = "1.0.1" +authors = ["Jakub Zajkowski "] +edition = "2021" +description = "Base module that spins up casper sidecar" readme = "README.md" -description = "App for storing and republishing sse events of a casper node" -license-file = "../LICENSE" -documentation = "README.md" -homepage = "https://github.com/CasperLabs/event-sidecar" -repository = "https://github.com/CasperLabs/event-sidecar" - -[features] -additional-metrics = ["casper-event-types/additional-metrics"] +homepage = "https://casperlabs.io" +repository = "https://github.com/casper-network/casper-sidecar/tree/dev" +license = "Apache-2.0" [dependencies] -anyhow = { version = "1.0.44", default-features = false } -async-trait = "0.1.56" -bytes = "1.2.0" -casper-event-listener = { path = "../listener", version = "1.0.0" } -casper-event-types = { path = "../types", version = "1.0.0" } -casper-types = { version = "3.0.0", features = ["std", "json-schema"] } +anyhow = { workspace = true } +backtrace = "0.3.69" +casper-event-sidecar = { workspace = true } +casper-event-types = { workspace = true } +casper-rpc-sidecar = { workspace = true } clap = { version = "4.0.32", features = ["derive"] } -derive-new = "0.5.9" -eventsource-stream = "0.2.3" -futures = "0.3.17" -hex = "0.4.3" -hex_fmt = "0.3.0" -http = "0.2.1" -hyper = "0.14.4" -indexmap = "2.0.0" -itertools = "0.10.3" -jsonschema = "0.17.1" -rand = "0.8.3" -regex = "1.6.0" -reqwest = "0.11.11" -schemars = "0.8.5" -sea-query = "0.30" -serde = { version = "1.0", features = ["derive", "rc"] } -serde_json = "1.0" -sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "any", "sqlite", "postgres"] } -thiserror = "1" -tokio = { version = "1.23.1", features = ["full"] } -tokio-stream = { version = "0.1.4", features = ["sync"] } -toml = "0.5.8" -tower = { version = "0.4.13", features = ["buffer", "limit", "make", "timeout"] } -tracing = "0.1" -tracing-subscriber = "0.3" -utoipa = { version = "3.4.4", features = ["rc_schema"]} -utoipa-swagger-ui = { version = "3.1.5" } -warp = { version = "0.3.6", features = ["compression"] } -wheelbuf = "0.2.0" -once_cell = { workspace = true } +datasize = { workspace = true, features = ["detailed", "fake_clock-types"] } +futures = { workspace = true } +num_cpus = "1" +serde = { workspace = true, default-features = false, features = ["alloc", "derive"] } +tokio = { workspace = true, features = ["full"] } +toml = { workspace = true } +tracing = { workspace = true, default-features = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] } +thiserror = { workspace = true } +derive-new = "0.6.0" +async-trait = { workspace = true } + +[dev-dependencies] +casper-event-sidecar = { workspace = true, features = ["testing"] } +casper-rpc-sidecar = { workspace = true, features = ["testing"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator = "0.5" -[dev-dependencies] -async-stream = { workspace = true } -casper-event-types = { path = "../types", version = "1.0.0", features = ["sse-data-testing"] } -casper-types = { version = "3.0.0", features = ["std", "testing"] } -colored = "2.0.0" -futures-util = { workspace = true } -portpicker = "0.1.1" -pretty_assertions = "1.3.0" -reqwest = { version = "0.11.3", features = ["stream"] } -tabled = { version = "0.10.0", features = ["derive", "color"] } -tempfile = "3" -tokio-util = "0.7.8" -pg-embed = { git = "https://github.com/faokunega/pg-embed", tag = "v0.8.0" } [package.metadata.deb] revision = "0" assets = [ - ["../target/release/casper-event-sidecar", "/usr/bin/casper-event-sidecar", "755"], - ["../resources/ETC_README.md", "/etc/casper-event-sidecar/README.md", "644"], - ["../resources/default_config.toml", "/etc/casper-event-sidecar/config.toml", "644"] + ["../target/release/casper-sidecar", "/usr/bin/casper-sidecar", "755"], + ["../resources/ETC_README.md", "/etc/casper-sidecar/README.md", "644"], + ["../resources/example_configs/default_debian_config.toml", "/etc/casper-sidecar/config.toml", "644"] ] maintainer-scripts = "../resources/maintainer_scripts/debian" extended-description = """ -Package for Casper Event Sidecar +Package for Casper Sidecar """ [package.metadata.deb.systemd-units] -unit-scripts = "../resources/maintainer_scripts/casper_event_sidecar" +unit-scripts = "../resources/maintainer_scripts/casper_sidecar" restart-after-upgrade = true [package.metadata.deb.variants.bionic] -name = "casper-event-sidecar" +name = "casper-sidecar" revision = "0+bionic" [package.metadata.deb.variants.focal] -name = "casper-event-sidecar" +name = "casper-sidecar" revision = "0+focal" + +[package.metadata.deb.variants.jammy] +name = "casper-sidecar" +revision = "0+jammy" diff --git a/sidecar/src/component.rs b/sidecar/src/component.rs new file mode 100644 index 00000000..17abdc9d --- /dev/null +++ b/sidecar/src/component.rs @@ -0,0 +1,455 @@ +use anyhow::Error; +use async_trait::async_trait; +use casper_event_sidecar::{ + run as run_sse_sidecar, run_admin_server, run_rest_server, LazyDatabaseWrapper, +}; +use casper_rpc_sidecar::build_rpc_server; +use derive_new::new; +use futures::{future::BoxFuture, FutureExt}; +use std::{ + fmt::{Display, Formatter, Result as FmtResult}, + process::ExitCode, +}; +use tracing::info; + +use crate::config::SidecarConfig; + +#[derive(Debug)] +pub enum ComponentError { + Initialization { + component_name: String, + internal_error: Error, + }, + Runtime { + component_name: String, + internal_error: Error, + }, +} + +impl ComponentError { + pub fn runtime_error(component_name: String, error: Error) -> Self { + ComponentError::Runtime { + component_name, + internal_error: error, + } + } + + pub fn initialization_error(component_name: String, error: Error) -> Self { + ComponentError::Initialization { + component_name, + internal_error: error, + } + } +} + +impl Display for ComponentError { + fn fmt(&self, f: &mut Formatter) -> FmtResult { + match self { + ComponentError::Initialization { + component_name, + internal_error, + } => write!( + f, + "Error initializing component '{}': {}", + component_name, internal_error + ), + ComponentError::Runtime { + component_name, + internal_error, + } => write!( + f, + "Error running component '{}': {}", + component_name, internal_error + ), + } + } +} + +/// Abstraction for an individual component of sidecar. The assumption is that this should be +/// a long running task that is spawned into the tokio runtime. +#[async_trait] +pub trait Component { + fn name(&self) -> String; + /// Returns a future that represents the task of the running component. + /// If the return value is Ok(None) it means that the component is disabled (or not configured at all) and should not run. + async fn prepare_component_task( + &self, + config: &SidecarConfig, + ) -> Result>>, ComponentError>; +} + +#[derive(new)] +pub struct SseServerComponent { + maybe_database: Option, +} + +#[async_trait] +impl Component for SseServerComponent { + async fn prepare_component_task( + &self, + config: &SidecarConfig, + ) -> Result>>, ComponentError> { + if let (maybe_database, Some(sse_server_config), Some(storage_config)) = + (&self.maybe_database, &config.sse_server, &config.storage) + { + if sse_server_config.enable_server { + let maybe_database = if let Some(lazy_database_wrapper) = maybe_database { + let database = + lazy_database_wrapper + .acquire() + .await + .clone() + .map_err(|db_err| { + ComponentError::runtime_error(self.name(), (&db_err).into()) + })?; + Some(database) + } else { + None + }; + + // If sse server is configured, both storage config and database must be "Some" here. This should be ensured by prior validation. + let future = run_sse_sidecar( + sse_server_config.clone(), + storage_config.storage_folder.clone(), + maybe_database, + config.network_name.clone(), + ) + .map(|res| res.map_err(|e| ComponentError::runtime_error(self.name(), e))); + Ok(Some(Box::pin(future))) + } else { + info!("SSE server is disabled. Skipping..."); + Ok(None) + } + } else { + info!("SSE server not configured. Skipping"); + Ok(None) + } + } + + fn name(&self) -> String { + "sse_event_server".to_string() + } +} + +#[derive(new)] +pub struct RestApiComponent { + maybe_database: Option, +} + +#[async_trait] +impl Component for RestApiComponent { + async fn prepare_component_task( + &self, + config: &SidecarConfig, + ) -> Result>>, ComponentError> { + if let (Some(config), Some(database)) = (&config.rest_api_server, &self.maybe_database) { + if config.enable_server { + let database = + database.acquire().await.as_ref().map_err(|db_err| { + ComponentError::runtime_error(self.name(), db_err.into()) + })?; + let future = run_rest_server(config.clone(), database.clone()) + .map(|res| res.map_err(|e| ComponentError::runtime_error(self.name(), e))); + Ok(Some(Box::pin(future))) + } else { + info!("REST API server is disabled. Skipping..."); + Ok(None) + } + } else { + info!("REST API server not configured. Skipping"); + Ok(None) + } + } + + fn name(&self) -> String { + "rest_api_server".to_string() + } +} + +#[derive(new)] +pub struct AdminApiComponent; + +#[async_trait] +impl Component for AdminApiComponent { + async fn prepare_component_task( + &self, + config: &SidecarConfig, + ) -> Result>>, ComponentError> { + if let Some(config) = &config.admin_api_server { + if config.enable_server { + let future = run_admin_server(config.clone()) + .map(|res| res.map_err(|e| ComponentError::runtime_error(self.name(), e))); + Ok(Some(Box::pin(future))) + } else { + info!("Admin API server is disabled. Skipping."); + Ok(None) + } + } else { + info!("Admin API server not configured. Skipping."); + Ok(None) + } + } + + fn name(&self) -> String { + "admin_api_server".to_string() + } +} + +#[derive(new)] +pub struct RpcApiComponent; + +#[async_trait] +impl Component for RpcApiComponent { + async fn prepare_component_task( + &self, + config: &SidecarConfig, + ) -> Result>>, ComponentError> { + if let Some(rpc_server_config) = config.rpc_server.as_ref() { + let is_main_exec_defined = rpc_server_config.main_server.enable_server; + let is_speculative_exec_defined = rpc_server_config + .speculative_exec_server + .as_ref() + .map(|x| x.enable_server) + .unwrap_or(false); + if !is_main_exec_defined && !is_speculative_exec_defined { + //There was no main rpc server of speculative exec server configured, we shouldn't bother with proceeding + info!("RPC API server is disabled. Skipping..."); + return Ok(None); + } + if !is_main_exec_defined { + info!("Main RPC API server is disabled. Only speculative server will be running."); + } + if !is_speculative_exec_defined { + info!("Speculative RPC API server is disabled. Only main RPC API will be running."); + } + let res = + build_rpc_server(rpc_server_config.clone(), config.network_name.clone()).await; + match res { + Ok(None) => Ok(None), + Ok(Some(fut)) => { + let future = fut + .map(|res| res.map_err(|e| ComponentError::runtime_error(self.name(), e))); + Ok(Some(Box::pin(future))) + } + Err(err) => Err(ComponentError::initialization_error(self.name(), err)), + } + } else { + info!("RPC API server not configured. Skipping."); + Ok(None) + } + } + + fn name(&self) -> String { + "rpc_api_server".to_string() + } +} + +#[cfg(test)] +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, + }; + + use super::*; + use crate::config::SidecarConfig; + use casper_rpc_sidecar::{ + testing::{get_port, start_mock_binary_port_responding_with_stored_value}, + NodeClientConfig, RpcServerConfig, SpeculativeExecConfig, + }; + + #[tokio::test] + async fn given_sse_server_component_when_no_db_but_config_defined_should_return_some() { + let component = SseServerComponent::new(None); + let config = all_components_all_enabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_some()); + } + + #[tokio::test] + async fn given_sse_server_component_when_db_but_no_config_should_return_none() { + let component = SseServerComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let mut config = all_components_all_disabled(); + config.sse_server = None; + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_sse_server_component_when_config_disabled_should_return_none() { + let component = SseServerComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let config = all_components_all_disabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_sse_server_component_when_db_and_config_should_return_some() { + let component = SseServerComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let config = all_components_all_enabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_some()); + } + + #[tokio::test] + async fn given_rest_api_server_component_when_no_db_should_return_none() { + let component = RestApiComponent::new(None); + let config = all_components_all_enabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_rest_api_server_component_when_db_but_no_config_should_return_none() { + let component = RestApiComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let mut config = all_components_all_disabled(); + config.rest_api_server = None; + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_rest_api_server_component_when_config_disabled_should_return_none() { + let component = RestApiComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let config = all_components_all_disabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_rest_api_server_component_when_db_and_config_should_return_some() { + let component = RestApiComponent::new(Some(LazyDatabaseWrapper::for_tests())); + let config = all_components_all_enabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_some()); + } + + #[tokio::test] + async fn given_admin_api_server_component_when_no_config_should_return_none() { + let component = AdminApiComponent::new(); + let mut config = all_components_all_disabled(); + config.admin_api_server = None; + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_admin_api_server_component_when_config_disabled_should_return_none() { + let component = AdminApiComponent::new(); + let config = all_components_all_disabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_admin_api_server_component_when_config_should_return_some() { + let component = AdminApiComponent::new(); + let config = all_components_all_enabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_some()); + } + + #[tokio::test] + async fn given_rpc_api_server_component_when_config_disabled_should_return_none() { + let component = RpcApiComponent::new(); + let config = all_components_all_disabled(); + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + #[tokio::test] + async fn given_rpc_api_server_component_when_config_should_return_some() { + let port = get_port(); + let shutdown = Arc::new(tokio::sync::Notify::new()); + let _mock_server_handle = start_mock_binary_port_responding_with_stored_value( + port, + None, + None, + Arc::clone(&shutdown), + ) + .await; + let component = RpcApiComponent::new(); + let mut config = all_components_all_enabled(); + config.rpc_server.as_mut().unwrap().node_client = + NodeClientConfig::new_with_port_and_retries(port, 1); + + let main_server_config = &mut config.rpc_server.as_mut().unwrap().main_server; + main_server_config.ip_address = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + main_server_config.port = port; + + let speculative_exec_server_config = config + .rpc_server + .as_mut() + .unwrap() + .speculative_exec_server + .as_mut() + .unwrap(); + speculative_exec_server_config.ip_address = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + speculative_exec_server_config.port = port; + + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_some()); + } + + #[tokio::test] + async fn given_rpc_api_server_component_when_no_config_should_return_none() { + let component = RpcApiComponent::new(); + let mut config = all_components_all_disabled(); + config.rpc_server = None; + let res = component.prepare_component_task(&config).await; + assert!(res.is_ok()); + assert!(res.unwrap().is_none()); + } + + fn all_components_all_enabled() -> SidecarConfig { + let mut rpc_server = RpcServerConfig::default(); + let speculative_config = SpeculativeExecConfig { + enable_server: true, + ..Default::default() + }; + rpc_server.speculative_exec_server = Some(speculative_config); + SidecarConfig { + storage: Some(Default::default()), + admin_api_server: Some(Default::default()), + rest_api_server: Some(Default::default()), + sse_server: Some(Default::default()), + rpc_server: Some(rpc_server), + ..Default::default() + } + } + + fn all_components_all_disabled() -> SidecarConfig { + let mut config = all_components_all_enabled(); + config.admin_api_server.as_mut().unwrap().enable_server = false; + config.rest_api_server.as_mut().unwrap().enable_server = false; + config + .rpc_server + .as_mut() + .unwrap() + .main_server + .enable_server = false; + config + .rpc_server + .as_mut() + .unwrap() + .speculative_exec_server + .as_mut() + .unwrap() + .enable_server = false; + config.sse_server.as_mut().unwrap().enable_server = false; + config + } +} diff --git a/sidecar/src/config.rs b/sidecar/src/config.rs new file mode 100644 index 00000000..2e76dcfd --- /dev/null +++ b/sidecar/src/config.rs @@ -0,0 +1,262 @@ +use anyhow::bail; +use casper_event_sidecar::{ + AdminApiServerConfig, DatabaseConfigError, RestApiServerConfig, SseEventServerConfig, + StorageConfig, StorageConfigSerdeTarget, +}; +use casper_rpc_sidecar::{FieldParseError, RpcServerConfig}; +use serde::Deserialize; +use thiserror::Error; + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +pub struct SidecarConfigTarget { + max_thread_count: Option, + max_blocking_thread_count: Option, + network_name: Option, + storage: Option, + rest_api_server: Option, + admin_api_server: Option, + sse_server: Option, + rpc_server: Option, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +#[cfg_attr(test, derive(Default))] +pub struct SidecarConfig { + pub max_thread_count: Option, + pub max_blocking_thread_count: Option, + pub network_name: Option, + pub sse_server: Option, + pub rpc_server: Option, + pub storage: Option, + pub rest_api_server: Option, + pub admin_api_server: Option, +} + +impl SidecarConfig { + pub fn validate(&self) -> Result<(), anyhow::Error> { + if !self.is_rpc_server_enabled() && !self.is_sse_server_enabled() { + bail!("At least one of RPC server or SSE server must be configured") + } + let is_storage_enabled = self.is_storage_enabled(); + let is_rest_api_server_enabled = self.is_rest_api_server_enabled(); + let is_sse_storing_events = self.is_sse_storing_events(); + if self.is_sse_server_enabled() && self.storage.is_none() { + bail!("Can't run SSE if no `[storage.storage_folder]` is defined") + } + if !is_storage_enabled && is_sse_storing_events { + bail!("Can't run SSE with events persistence enabled without storage defined") + } + //Check if both storages are defined and enabled + if !is_storage_enabled && is_rest_api_server_enabled { + bail!("Can't run Rest api server without storage defined") + } + if !is_sse_storing_events && is_rest_api_server_enabled { + bail!("Can't run Rest api server with SSE events persistence disabled") + } + let is_postgres_enabled = self.is_postgres_enabled(); + let is_sqlite_enabled = self.is_sqlite_enabled(); + if is_storage_enabled && is_postgres_enabled && is_sqlite_enabled { + bail!("Can't run with both postgres and sqlite enabled") + } + Ok(()) + } + + fn is_storage_enabled(&self) -> bool { + self.storage + .as_ref() + .map(|x| x.is_enabled()) + .unwrap_or(false) + } + + fn is_rpc_server_enabled(&self) -> bool { + self.rpc_server.is_some() && self.rpc_server.as_ref().unwrap().main_server.enable_server + } + + fn is_sse_server_enabled(&self) -> bool { + self.sse_server.is_some() && self.sse_server.as_ref().unwrap().enable_server + } + + fn is_sse_storing_events(&self) -> bool { + self.is_sse_server_enabled() && !self.sse_server.as_ref().unwrap().disable_event_persistence + } + + fn is_postgres_enabled(&self) -> bool { + self.storage + .as_ref() + .map(|x| x.is_postgres_enabled()) + .unwrap_or(false) + } + + fn is_sqlite_enabled(&self) -> bool { + self.storage + .as_ref() + .map(|x| x.is_sqlite_enabled()) + .unwrap_or(false) + } + + fn is_rest_api_server_enabled(&self) -> bool { + self.rest_api_server.is_some() && self.rest_api_server.as_ref().unwrap().enable_server + } +} + +impl TryFrom for SidecarConfig { + type Error = ConfigReadError; + + fn try_from(value: SidecarConfigTarget) -> Result { + let sse_server_config = value.sse_server; + let storage_config_res: Result, DatabaseConfigError> = value + .storage + .map(|target| target.try_into().map(Some)) + .unwrap_or(Ok(None)); + let storage_config = storage_config_res?; + Ok(SidecarConfig { + max_thread_count: value.max_thread_count, + max_blocking_thread_count: value.max_blocking_thread_count, + network_name: value.network_name, + sse_server: sse_server_config, + rpc_server: value.rpc_server, + storage: storage_config, + rest_api_server: value.rest_api_server, + admin_api_server: value.admin_api_server, + }) + } +} + +#[derive(Error, Debug)] +pub enum ConfigReadError { + #[error("failed to read sidecar configuration. Underlying reason: {}", .error)] + GeneralError { error: String }, +} + +impl From for ConfigReadError { + fn from(value: FieldParseError) -> Self { + ConfigReadError::GeneralError { + error: value.to_string(), + } + } +} + +impl From for ConfigReadError { + fn from(value: DatabaseConfigError) -> Self { + ConfigReadError::GeneralError { + error: value.to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sidecar_config_should_fail_validation_when_sse_server_and_no_storage() { + let config = SidecarConfig { + sse_server: Some(SseEventServerConfig::default_no_persistence()), + storage: None, + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message.contains("Can't run SSE if no `[storage.storage_folder]` is defined")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_sse_server_and_no_defined_dbs() { + let config = SidecarConfig { + sse_server: Some(SseEventServerConfig::default()), + storage: Some(StorageConfig::no_dbs()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message + .contains("Can't run SSE with events persistence enabled without storage defined")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_sse_server_and_no_enabled_dbs() { + let config = SidecarConfig { + sse_server: Some(SseEventServerConfig::default()), + storage: Some(StorageConfig::no_enabled_dbs()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message + .contains("Can't run SSE with events persistence enabled without storage defined")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_rest_api_server_and_no_storage() { + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + sse_server: None, + rest_api_server: Some(RestApiServerConfig::default()), + storage: Some(StorageConfig::default()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message + .contains("Can't run Rest api server with SSE events persistence disabled")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_two_db_connections_are_defined() { + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + sse_server: None, + storage: Some(StorageConfig::two_dbs()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message.contains("Can't run with both postgres and sqlite enabled")); + } + + #[test] + fn sidecar_config_should_fail_validation_when_rest_api_and_sse_has_no_persistence() { + let sse_server = SseEventServerConfig::default_no_persistence(); + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + sse_server: Some(sse_server), + rest_api_server: Some(RestApiServerConfig::default()), + storage: Some(StorageConfig::default()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_err()); + let error_message = res.err().unwrap().to_string(); + assert!(error_message + .contains("Can't run Rest api server with SSE events persistence disabled")); + } + + #[test] + fn sidecar_config_should_be_ok_if_rpc_is_defined_and_nothing_else() { + let config = SidecarConfig { + rpc_server: Some(RpcServerConfig::default()), + ..Default::default() + }; + + let res = config.validate(); + + assert!(res.is_ok()); + } +} diff --git a/sidecar/src/config/speculative_exec_config.rs b/sidecar/src/config/speculative_exec_config.rs new file mode 100644 index 00000000..61cc9839 --- /dev/null +++ b/sidecar/src/config/speculative_exec_config.rs @@ -0,0 +1,49 @@ +use datasize::DataSize; +use serde::Deserialize; + +/// Default binding address for the speculative execution RPC HTTP server. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_ADDRESS: &str = "0.0.0.0:1"; +/// Default rate limit in qps. +const DEFAULT_QPS_LIMIT: u64 = 1; +/// Default max body bytes (2.5MB). +const DEFAULT_MAX_BODY_BYTES: u32 = 2_621_440; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; + +/// JSON-RPC HTTP server configuration. +#[derive(Clone, DataSize, Debug, Deserialize)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecConfig { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind JSON-RPC speculative execution server to. + pub address: String, + /// Maximum rate limit in queries per second. + pub qps_limit: u64, + /// Maximum number of bytes to accept in a single request body. + pub max_body_bytes: u32, + /// CORS origin. + pub cors_origin: String, +} + +impl SpeculativeExecConfig { + /// Creates a default instance for `RpcServer`. + pub fn new() -> Self { + SpeculativeExecConfig { + enable_server: false, + address: DEFAULT_ADDRESS.to_string(), + qps_limit: DEFAULT_QPS_LIMIT, + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), + } + } +} + +impl Default for SpeculativeExecConfig { + fn default() -> Self { + SpeculativeExecConfig::new() + } +} diff --git a/sidecar/src/database/sqlite_database/tests.rs b/sidecar/src/database/sqlite_database/tests.rs deleted file mode 100644 index bacf8a88..00000000 --- a/sidecar/src/database/sqlite_database/tests.rs +++ /dev/null @@ -1,388 +0,0 @@ -use sea_query::{Asterisk, Expr, Query, SqliteQueryBuilder}; -use sqlx::Row; - -use casper_types::testing::TestRng; - -use super::SqliteDatabase; -use crate::{ - sql::tables::{self, event_type::EventTypeId}, - types::{database::DatabaseWriter, sse_events::*}, -}; - -const MAX_CONNECTIONS: u32 = 100; - -#[cfg(test)] -async fn build_database() -> SqliteDatabase { - SqliteDatabase::new_in_memory(MAX_CONNECTIONS) - .await - .expect("Error opening database in memory") -} - -#[tokio::test] -async fn should_save_and_retrieve_a_u32max_id() { - let sqlite_db = build_database().await; - let sql = tables::event_log::create_insert_stmt(1, "source", u32::MAX, "event key") - .expect("Error creating event_log insert SQL") - .to_string(SqliteQueryBuilder); - - let _ = sqlite_db.fetch_one(&sql).await; - - let sql = Query::select() - .column(tables::event_log::EventLog::EventId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_id_u32max = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_id (=u32::MAX) from row"); - - assert_eq!(event_id_u32max, u32::MAX); -} - -#[tokio::test] -async fn should_save_and_retrieve_block_added() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_block_added(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_deploy_accepted() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_deploy_accepted(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_deploy_processed() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_deploy_processed(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_deploy_expired() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_deploy_expired(sqlite_db).await; -} - -#[tokio::test] -async fn should_retrieve_deploy_aggregate_of_accepted() { - let sqlite_db = build_database().await; - crate::database::tests::should_retrieve_deploy_aggregate_of_accepted(sqlite_db).await; -} - -#[tokio::test] -async fn should_retrieve_deploy_aggregate_of_processed() { - let sqlite_db = build_database().await; - crate::database::tests::should_retrieve_deploy_aggregate_of_processed(sqlite_db).await; -} - -#[tokio::test] -async fn should_retrieve_deploy_aggregate_of_expired() { - let sqlite_db = build_database().await; - crate::database::tests::should_retrieve_deploy_aggregate_of_expired(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_fault() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_fault(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_fault_with_a_u64max() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_fault_with_a_u64max(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_finality_signature() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_finality_signature(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_step() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_step(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_and_retrieve_a_step_with_u64_max_era() { - let sqlite_db = build_database().await; - crate::database::tests::should_save_and_retrieve_a_step_with_u64_max_era(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_duplicate_event_id_from_source() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_duplicate_event_id_from_source(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_block_added() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_block_added(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_deploy_accepted() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_deploy_accepted(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_deploy_expired() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_deploy_expired(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_deploy_processed() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_deploy_processed(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_fault() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_fault(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_finality_signature() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_finality_signature(sqlite_db).await; -} - -#[tokio::test] -async fn should_disallow_insert_of_existing_step() { - let sqlite_db = build_database().await; - crate::database::tests::should_disallow_insert_of_existing_step(sqlite_db).await; -} - -#[tokio::test] -async fn should_save_block_added_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let block_added = BlockAdded::random(&mut test_rng); - - assert!(sqlite_db - .save_block_added(block_added, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::BlockAdded as i16) -} - -#[tokio::test] -async fn should_save_deploy_accepted_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let deploy_accepted = DeployAccepted::random(&mut test_rng); - - assert!(sqlite_db - .save_deploy_accepted(deploy_accepted, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployAccepted as i16) -} - -#[tokio::test] -async fn should_save_deploy_processed_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let deploy_processed = DeployProcessed::random(&mut test_rng, None); - - assert!(sqlite_db - .save_deploy_processed(deploy_processed, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployProcessed as i16) -} - -#[tokio::test] -async fn should_save_deploy_expired_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let deploy_expired = DeployExpired::random(&mut test_rng, None); - - assert!(sqlite_db - .save_deploy_expired(deploy_expired, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::DeployExpired as i16) -} - -#[tokio::test] -async fn should_save_fault_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let fault = Fault::random(&mut test_rng); - - assert!(sqlite_db - .save_fault(fault, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::Fault as i16) -} - -#[tokio::test] -async fn should_save_finality_signature_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let finality_signature = FinalitySignature::random(&mut test_rng); - - assert!(sqlite_db - .save_finality_signature(finality_signature, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::FinalitySignature as i16) -} - -#[tokio::test] -async fn should_save_step_with_correct_event_type_id() { - let mut test_rng = TestRng::new(); - - let sqlite_db = build_database().await; - - let step = Step::random(&mut test_rng); - - assert!(sqlite_db - .save_step(step, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let sql = Query::select() - .column(tables::event_log::EventLog::EventTypeId) - .from(tables::event_log::EventLog::Table) - .limit(1) - .to_string(SqliteQueryBuilder); - - let event_type_id = sqlite_db - .fetch_one(&sql) - .await - .try_get::(0) - .expect("Error getting event_type_id from row"); - - assert_eq!(event_type_id, EventTypeId::Step as i16) -} - -#[tokio::test] -async fn should_save_and_retrieve_a_shutdown() { - let sqlite_db = build_database().await; - assert!(sqlite_db.save_shutdown(15, "xyz".to_string()).await.is_ok()); - - let sql = Query::select() - .expr(Expr::col(Asterisk)) - .from(tables::shutdown::Shutdown::Table) - .to_string(SqliteQueryBuilder); - let row = sqlite_db.fetch_one(&sql).await; - - assert_eq!( - row.get::("event_source_address"), - "xyz".to_string() - ); -} - -#[tokio::test] -async fn get_number_of_events_should_return_0() { - let sqlite_db = build_database().await; - crate::database::tests::get_number_of_events_should_return_0(sqlite_db).await; -} - -#[tokio::test] -async fn get_number_of_events_should_return_1_when_event_stored() { - let sqlite_db = build_database().await; - crate::database::tests::get_number_of_events_should_return_1_when_event_stored(sqlite_db).await; -} diff --git a/sidecar/src/database/tests.rs b/sidecar/src/database/tests.rs deleted file mode 100644 index e357ce52..00000000 --- a/sidecar/src/database/tests.rs +++ /dev/null @@ -1,430 +0,0 @@ -use crate::types::{ - database::{DatabaseReader, DatabaseWriteError, DatabaseWriter}, - sse_events::*, -}; -use casper_types::{testing::TestRng, AsymmetricType, EraId}; -use rand::Rng; - -pub async fn should_save_and_retrieve_block_added(db: DB) { - let mut test_rng = TestRng::new(); - let block_added = BlockAdded::random(&mut test_rng); - - db.save_block_added(block_added.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving block_added"); - - db.get_latest_block() - .await - .expect("Error getting latest block_added"); - - db.get_block_by_hash(&block_added.hex_encoded_hash()) - .await - .expect("Error getting block_added by hash"); - - db.get_block_by_height(block_added.get_height()) - .await - .expect("Error getting block_added by height"); -} - -pub async fn should_save_and_retrieve_deploy_accepted(db: DB) { - let mut test_rng = TestRng::new(); - - let deploy_accepted = DeployAccepted::random(&mut test_rng); - - db.save_deploy_accepted(deploy_accepted.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_accepted"); - - db.get_deploy_accepted_by_hash(&deploy_accepted.hex_encoded_hash()) - .await - .expect("Error getting deploy_accepted by hash"); -} - -pub async fn should_save_and_retrieve_deploy_processed( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_processed = DeployProcessed::random(&mut test_rng, None); - - db.save_deploy_processed(deploy_processed.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_processed"); - - db.get_deploy_processed_by_hash(&deploy_processed.hex_encoded_hash()) - .await - .expect("Error getting deploy_processed by hash"); -} - -pub async fn should_save_and_retrieve_deploy_expired(db: DB) { - let mut test_rng = TestRng::new(); - let deploy_expired = DeployExpired::random(&mut test_rng, None); - - db.save_deploy_expired(deploy_expired.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_expired"); - - db.get_deploy_expired_by_hash(&deploy_expired.hex_encoded_hash()) - .await - .expect("Error getting deploy_expired by hash"); -} - -pub async fn should_retrieve_deploy_aggregate_of_accepted( - db: DB, -) { - let mut test_rng = TestRng::new(); - - let deploy_accepted = DeployAccepted::random(&mut test_rng); - - db.save_deploy_accepted(deploy_accepted.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_accepted"); - - db.get_deploy_aggregate_by_hash(&deploy_accepted.hex_encoded_hash()) - .await - .expect("Error getting deploy aggregate by hash"); -} - -pub async fn should_retrieve_deploy_aggregate_of_processed( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_accepted = DeployAccepted::random(&mut test_rng); - let deploy_processed = - DeployProcessed::random(&mut test_rng, Some(deploy_accepted.deploy_hash())); - - db.save_deploy_accepted(deploy_accepted.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_accepted"); - - db.save_deploy_processed(deploy_processed, 2, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_processed"); - - db.get_deploy_aggregate_by_hash(&deploy_accepted.hex_encoded_hash()) - .await - .expect("Error getting deploy aggregate by hash"); -} - -pub async fn should_retrieve_deploy_aggregate_of_expired( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_accepted = DeployAccepted::random(&mut test_rng); - let deploy_expired = DeployExpired::random(&mut test_rng, Some(deploy_accepted.deploy_hash())); - - db.save_deploy_accepted(deploy_accepted.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_accepted"); - - db.save_deploy_expired(deploy_expired, 2, "127.0.0.1".to_string()) - .await - .expect("Error saving deploy_expired"); - - db.get_deploy_aggregate_by_hash(&deploy_accepted.hex_encoded_hash()) - .await - .expect("Error getting deploy aggregate by hash"); -} - -pub async fn should_save_and_retrieve_fault(db: DB) { - let mut test_rng = TestRng::new(); - let fault = Fault::random(&mut test_rng); - - db.save_fault(fault.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving fault"); - - db.get_faults_by_era(fault.era_id.value()) - .await - .expect("Error getting faults by era"); - - db.get_faults_by_public_key(&fault.public_key.to_hex()) - .await - .expect("Error getting faults by public key"); -} - -pub async fn should_save_and_retrieve_fault_with_a_u64max( - db: DB, -) { - let mut test_rng = TestRng::new(); - let mut fault = Fault::random(&mut test_rng); - fault.era_id = EraId::new(u64::MAX); - - db.save_fault(fault.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving fault with a u64::MAX era id"); - - let faults = db - .get_faults_by_era(u64::MAX) - .await - .expect("Error getting faults by era with u64::MAX era id"); - - assert_eq!(faults[0].era_id.value(), u64::MAX); - - let faults = db - .get_faults_by_public_key(&fault.public_key.to_hex()) - .await - .expect("Error getting faults by public key with u64::MAX era id"); - - assert_eq!(faults[0].era_id.value(), u64::MAX); -} - -pub async fn should_save_and_retrieve_finality_signature( - db: DB, -) { - let mut test_rng = TestRng::new(); - let finality_signature = FinalitySignature::random(&mut test_rng); - - db.save_finality_signature(finality_signature.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving finality_signature"); - - db.get_finality_signatures_by_block(&finality_signature.hex_encoded_block_hash()) - .await - .expect("Error getting finality signatures by block_hash"); -} - -pub async fn should_save_and_retrieve_step(db: DB) { - let mut test_rng = TestRng::new(); - let step = Step::random(&mut test_rng); - - db.save_step(step.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving step"); - - db.get_step_by_era(step.era_id.value()) - .await - .expect("Error getting step by era"); -} - -pub async fn should_save_and_retrieve_a_step_with_u64_max_era< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let mut step = Step::random(&mut test_rng); - step.era_id = EraId::new(u64::MAX); - - db.save_step(step.clone(), 1, "127.0.0.1".to_string()) - .await - .expect("Error saving Step with u64::MAX era id"); - - let retrieved_step = db - .get_step_by_era(u64::MAX) - .await - .expect("Error retrieving Step with u64::MAX era id"); - - assert_eq!(retrieved_step.era_id.value(), u64::MAX) -} - -pub async fn should_disallow_duplicate_event_id_from_source( - db: DB, -) { - let mut test_rng = TestRng::new(); - let event_id = test_rng.gen::(); - let block_added = BlockAdded::random(&mut test_rng); - - assert!(db - .save_block_added(block_added.clone(), event_id, "127.0.0.1".to_string()) - .await - .is_ok()); - let res = db - .save_block_added(block_added, event_id, "127.0.0.1".to_string()) - .await; - assert!(matches!(res, Err(DatabaseWriteError::UniqueConstraint(_)))); - // This check is to ensure that the UNIQUE constraint being broken is from the event_log table rather than from the raw event table. - if let Err(DatabaseWriteError::UniqueConstraint(uc_err)) = res { - assert_eq!(uc_err.table, "event_log") - } -} - -pub async fn should_disallow_insert_of_existing_block_added( - db: DB, -) { - let mut test_rng = TestRng::new(); - let block_added = BlockAdded::random(&mut test_rng); - - assert!(db - .save_block_added(block_added.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_block_added(block_added, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "BlockAdded") - } -} - -pub async fn should_disallow_insert_of_existing_deploy_accepted< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_accepted = DeployAccepted::random(&mut test_rng); - - assert!(db - .save_deploy_accepted(deploy_accepted.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_deploy_accepted(deploy_accepted, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "DeployAccepted") - } -} - -pub async fn should_disallow_insert_of_existing_deploy_expired< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_expired = DeployExpired::random(&mut test_rng, None); - - assert!(db - .save_deploy_expired(deploy_expired.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_deploy_expired(deploy_expired, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "DeployExpired") - } -} - -pub async fn should_disallow_insert_of_existing_deploy_processed< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let deploy_processed = DeployProcessed::random(&mut test_rng, None); - - assert!(db - .save_deploy_processed(deploy_processed.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_deploy_processed(deploy_processed, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "DeployProcessed") - } -} - -pub async fn should_disallow_insert_of_existing_fault(db: DB) { - let mut test_rng = TestRng::new(); - let fault = Fault::random(&mut test_rng); - - assert!(db - .save_fault(fault.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_fault(fault, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "Fault") - } -} - -pub async fn should_disallow_insert_of_existing_finality_signature< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let finality_signature = FinalitySignature::random(&mut test_rng); - - assert!(db - .save_finality_signature(finality_signature.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_finality_signature(finality_signature, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "FinalitySignature") - } -} - -pub async fn should_disallow_insert_of_existing_step(db: DB) { - let mut test_rng = TestRng::new(); - let step = Step::random(&mut test_rng); - - assert!(db - .save_step(step.clone(), 1, "127.0.0.1".to_string()) - .await - .is_ok()); - - let db_err = db - .save_step(step, 2, "127.0.0.1".to_string()) - .await - .unwrap_err(); - - assert!(matches!(db_err, DatabaseWriteError::UniqueConstraint(_))); - - // This check is to ensure that the UNIQUE constraint error is originating from the raw event table rather than the event_log - if let DatabaseWriteError::UniqueConstraint(uc_err) = db_err { - assert_eq!(uc_err.table, "Step") - } -} - -pub async fn get_number_of_events_should_return_0(db: DB) { - assert_eq!(db.get_number_of_events().await.unwrap(), 0); -} - -pub async fn get_number_of_events_should_return_1_when_event_stored< - DB: DatabaseReader + DatabaseWriter, ->( - db: DB, -) { - let mut test_rng = TestRng::new(); - let fault = Fault::random(&mut test_rng); - - assert!(db - .save_fault(fault, 1, "127.0.0.1".to_string()) - .await - .is_ok()); - assert_eq!(db.get_number_of_events().await.unwrap(), 1); -} diff --git a/sidecar/src/database/types.rs b/sidecar/src/database/types.rs deleted file mode 100644 index 6308d14a..00000000 --- a/sidecar/src/database/types.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// This struct holds flags that steer DDL generation for specific databases. -pub struct DDLConfiguration { - /// For postgresql we need to use bigserial as autogenerated id which. Bigserial is generated by sea query when a combination of `big_integer`, `primary_key` and `autoincrement` is used. - /// For sqlite we can't use `big_integer` becasue the ddl creation fails -> `big_integer` can't be combined with `autoincrement` in sqlite. Hence we need a way to know that for some databases we want to use `big_integer` and for some `integer` for autoincremented id definition. - pub is_big_integer_id: bool, - /// Postgresql doesn't support unsigned integers, so for some fields we need to be mindful of the fact that in postgres we might need to use a bigger type to accomodate scope of field - pub db_supports_unsigned: bool, -} diff --git a/sidecar/src/event_stream_server/endpoint.rs b/sidecar/src/event_stream_server/endpoint.rs deleted file mode 100644 index e3d72cc0..00000000 --- a/sidecar/src/event_stream_server/endpoint.rs +++ /dev/null @@ -1,71 +0,0 @@ -use casper_event_types::Filter; -#[cfg(test)] -use std::fmt::{Display, Formatter}; - -/// Enum representing all possible endpoints sidecar can have. -/// Be advised that extending variants in this enum requires -/// an update in `is_corresponding_to` function. -#[derive(Hash, Eq, PartialEq, Debug, Clone)] -pub enum Endpoint { - Events, - Main, - Deploys, - Sigs, - Sidecar, -} - -impl Endpoint { - pub fn is_corresponding_to(&self, filter: &Filter) -> bool { - matches!( - (self, filter.clone()), - (Endpoint::Events, Filter::Events) - | (Endpoint::Main, Filter::Main) - | (Endpoint::Deploys, Filter::Deploys) - | (Endpoint::Sigs, Filter::Sigs) - ) - } -} - -#[cfg(test)] -impl Display for Endpoint { - /// This implementation is for test only and created to mimick how Display is implemented for Filter. - /// We use this trick to easily test `is_corresponding_to` with all possible inputs. - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - Endpoint::Events => write!(f, "events"), - Endpoint::Main => write!(f, "events/main"), - Endpoint::Deploys => write!(f, "events/deploys"), - Endpoint::Sigs => write!(f, "events/sigs"), - Endpoint::Sidecar => write!(f, "events/sidecar"), - } - } -} - -#[cfg(test)] -mod tests { - use super::Endpoint; - use casper_event_types::Filter; - - #[test] - fn try_resolve_version_should_interpret_correct_build_version() { - let all_filters = vec![Filter::Events, Filter::Main, Filter::Deploys, Filter::Sigs]; - let all_endpoints = vec![ - Endpoint::Events, - Endpoint::Main, - Endpoint::Deploys, - Endpoint::Sigs, - Endpoint::Sidecar, - ]; - for endpoint in all_endpoints.iter() { - for filter in all_filters.iter() { - let endpoint_str = endpoint.to_string(); - let filter_str = filter.to_string(); - let should_be_correspodning = endpoint_str == filter_str; - assert_eq!( - should_be_correspodning, - endpoint.is_corresponding_to(filter) - ); - } - } - } -} diff --git a/sidecar/src/main.rs b/sidecar/src/main.rs index ca78bbdc..3cb0b003 100644 --- a/sidecar/src/main.rs +++ b/sidecar/src/main.rs @@ -1,66 +1,26 @@ -#![deny(clippy::complexity)] -#![deny(clippy::cognitive_complexity)] -#![deny(clippy::too_many_lines)] +pub mod component; +mod config; +mod run; -extern crate core; -mod admin_server; -mod api_version_manager; -mod database; -mod event_stream_server; -pub mod rest_server; -mod sql; -#[cfg(test)] -pub(crate) mod testing; -#[cfg(test)] -pub(crate) mod tests; -mod types; -mod utils; - -use std::collections::HashMap; -use std::convert::TryInto; -use std::{ - net::IpAddr, - path::{Path, PathBuf}, - str::FromStr, - time::Duration, -}; - -use crate::{ - admin_server::run_server as start_admin_server, - database::sqlite_database::SqliteDatabase, - event_stream_server::{Config as SseConfig, EventStreamServer}, - rest_server::run_server as start_rest_server, - types::{ - config::{read_config, Config}, - database::{DatabaseWriteError, DatabaseWriter}, - sse_events::*, - }, -}; use anyhow::{Context, Error}; -use api_version_manager::{ApiVersionManager, GuardedApiVersionManager}; -use casper_event_listener::{ - EventListener, EventListenerBuilder, NodeConnectionInterface, SseEvent, -}; -use casper_event_types::{metrics, sse_data::SseData, Filter}; +use backtrace::Backtrace; use clap::Parser; -use database::postgresql_database::PostgreSqlDatabase; -use futures::future::join_all; -use hex_fmt::HexFmt; +use config::{SidecarConfig, SidecarConfigTarget}; +use run::run; +use std::{ + env, fmt, io, + panic::{self, PanicHookInfo}, + process::{self, ExitCode}, +}; #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; -use tokio::{ - sync::mpsc::{channel as mpsc_channel, Receiver, Sender}, - task::JoinHandle, - time::sleep, -}; -use tracing::{debug, error, info, trace, warn}; -use types::config::Connection; -use types::{ - config::StorageConfig, - database::{Database, DatabaseReader}, +use tracing::{field::Field, info}; +use tracing_subscriber::{ + fmt::{format, format::Writer}, + EnvFilter, }; -#[cfg(feature = "additional-metrics")] -use utils::start_metrics_thread; + +const MAX_THREAD_COUNT: usize = 512; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -74,772 +34,90 @@ struct CmdLineArgs { path_to_config: String, } -const DEFAULT_CHANNEL_SIZE: usize = 1000; - -#[tokio::main] -async fn main() -> Result<(), Error> { +fn main() -> Result { // Install global collector for tracing - tracing_subscriber::fmt::init(); + init_logging()?; let args = CmdLineArgs::parse(); let path_to_config = args.path_to_config; let config_serde = read_config(&path_to_config).context("Error constructing config")?; - let config = config_serde.try_into()?; - + let config: SidecarConfig = config_serde.try_into()?; + config.validate()?; info!("Configuration loaded"); - run(config).await -} - -async fn run(config: Config) -> Result<(), Error> { - validate_config(&config)?; - let (event_listeners, sse_data_receivers) = build_event_listeners(&config)?; - let admin_server_handle = build_and_start_admin_server(&config); - // This channel allows SseData to be sent from multiple connected nodes to the single EventStreamServer. - let (outbound_sse_data_sender, outbound_sse_data_receiver) = - mpsc_channel(config.outbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); - let connection_configs = config.connections.clone(); - let storage_config = config.storage.clone(); - let database = build_database(&storage_config).await?; - let rest_server_handle = build_and_start_rest_server(&config, database.clone()); - - // Task to manage incoming events from all three filters - let listening_task_handle = start_sse_processors( - connection_configs, - event_listeners, - sse_data_receivers, - database.clone(), - outbound_sse_data_sender.clone(), - ); - - let event_broadcasting_handle = - start_event_broadcasting(&config, &storage_config, outbound_sse_data_receiver); - - tokio::try_join!( - flatten_handle(event_broadcasting_handle), - flatten_handle(rest_server_handle), - flatten_handle(listening_task_handle), - flatten_handle(admin_server_handle), - ) - .map(|_| Ok(()))? -} - -fn start_event_broadcasting( - config: &Config, - storage_config: &StorageConfig, - mut outbound_sse_data_receiver: Receiver<(SseData, Option, Option)>, -) -> JoinHandle> { - let storage_path = storage_config.get_storage_path(); - let event_stream_server_port = config.event_stream_server.port; - let buffer_length = config.event_stream_server.event_stream_buffer_length; - let max_concurrent_subscribers = config.event_stream_server.max_concurrent_subscribers; - tokio::spawn(async move { - // Create new instance for the Sidecar's Event Stream Server - let mut event_stream_server = EventStreamServer::new( - SseConfig::new( - event_stream_server_port, - Some(buffer_length), - Some(max_concurrent_subscribers), - ), - PathBuf::from(storage_path), - ) - .context("Error starting EventStreamServer")?; - while let Some((sse_data, inbound_filter, maybe_json_data)) = - outbound_sse_data_receiver.recv().await - { - event_stream_server.broadcast(sse_data, inbound_filter, maybe_json_data); - } - Err::<(), Error>(Error::msg("Event broadcasting finished")) - }) -} - -fn start_sse_processors( - connection_configs: Vec, - event_listeners: Vec, - sse_data_receivers: Vec>, - database: Database, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, -) -> JoinHandle> { - tokio::spawn(async move { - let mut join_handles = Vec::with_capacity(event_listeners.len()); - let api_version_manager = ApiVersionManager::new(); - - for ((mut event_listener, connection_config), sse_data_receiver) in event_listeners - .into_iter() - .zip(connection_configs) - .zip(sse_data_receivers) - { - tokio::spawn(async move { - let res = event_listener.stream_aggregated_events().await; - if let Err(e) = res { - let addr = event_listener.get_node_interface().ip_address.to_string(); - error!("Disconnected from {}. Reason: {}", addr, e.to_string()); - } - }); - let join_handle = spawn_sse_processor( - &database, - sse_data_receiver, - &outbound_sse_data_sender, - connection_config, - &api_version_manager, - ); - join_handles.push(join_handle); - } - - let _ = join_all(join_handles).await; - //Send Shutdown to the sidecar sse endpoint - let _ = outbound_sse_data_sender - .send((SseData::Shutdown, None, None)) - .await; - // Below sleep is a workaround to allow the above Shutdown to propagate. - // If we don't do this there is a race condition between handling of the message and dropping of the outbound server - // which happens when we leave this function and the `tokio::try_join!` exits due to this. This race condition causes 9 of 10 - // tries to not propagate the Shutdown (ususally drop happens faster than message propagation to outbound). - // Fixing this race condition would require rewriting a lot of code. AFAICT the only drawback to this workaround is that the - // rest server and the sse server will exit 200ms later than it would without it. - sleep(Duration::from_millis(200)).await; - Err::<(), Error>(Error::msg("Connected node(s) are unavailable")) - }) -} - -fn spawn_sse_processor( - database: &Database, - sse_data_receiver: Receiver, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - connection_config: Connection, - api_version_manager: &std::sync::Arc>, -) -> JoinHandle> { - match database.clone() { - Database::SqliteDatabaseWrapper(db) => tokio::spawn(sse_processor( - sse_data_receiver, - outbound_sse_data_sender.clone(), - db.clone(), - false, - connection_config.enable_logging, - api_version_manager.clone(), - )), - Database::PostgreSqlDatabaseWrapper(db) => tokio::spawn(sse_processor( - sse_data_receiver, - outbound_sse_data_sender.clone(), - db.clone(), - true, - connection_config.enable_logging, - api_version_manager.clone(), - )), - } -} - -fn build_and_start_rest_server( - config: &Config, - database: Database, -) -> JoinHandle> { - let rest_server_config = config.rest_server.clone(); - tokio::spawn(async move { - match database { - Database::SqliteDatabaseWrapper(db) => { - start_rest_server(rest_server_config, db.clone()).await - } - Database::PostgreSqlDatabaseWrapper(db) => { - start_rest_server(rest_server_config, db.clone()).await - } - } - }) -} - -fn build_and_start_admin_server(config: &Config) -> JoinHandle> { - let admin_server_config = config.admin_server.clone(); - tokio::spawn(async move { - if let Some(config) = admin_server_config { - start_admin_server(config).await - } else { - Ok(()) - } - }) -} - -async fn build_database(config: &StorageConfig) -> Result { - match config { - StorageConfig::SqliteDbConfig { - storage_path, - sqlite_config, - } => { - let path_to_database_dir = Path::new(storage_path); - let sqlite_database = SqliteDatabase::new(path_to_database_dir, sqlite_config.clone()) - .await - .context("Error instantiating sqlite database")?; - Ok(Database::SqliteDatabaseWrapper(sqlite_database)) - } - StorageConfig::PostgreSqlDbConfig { - postgresql_config, .. - } => { - let postgres_database = PostgreSqlDatabase::new(postgresql_config.clone()) - .await - .context("Error instantiating postgres database")?; - Ok(Database::PostgreSqlDatabaseWrapper(postgres_database)) - } - } -} - -fn build_event_listeners( - config: &Config, -) -> Result<(Vec, Vec>), Error> { - let mut event_listeners = Vec::with_capacity(config.connections.len()); - let mut sse_data_receivers = Vec::new(); - for connection in &config.connections { - let (inbound_sse_data_sender, inbound_sse_data_receiver) = - mpsc_channel(config.inbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE)); - sse_data_receivers.push(inbound_sse_data_receiver); - let event_listener = builder(connection, inbound_sse_data_sender)?.build(); - event_listeners.push(event_listener?); - } - Ok((event_listeners, sse_data_receivers)) -} - -fn builder( - connection: &Connection, - inbound_sse_data_sender: Sender, -) -> Result { - let node_interface = NodeConnectionInterface { - ip_address: IpAddr::from_str(&connection.ip_address)?, - sse_port: connection.sse_port, - rest_port: connection.rest_port, - }; - let event_listener_builder = EventListenerBuilder { - node: node_interface, - max_connection_attempts: connection.max_attempts, - delay_between_attempts: Duration::from_secs( - connection.delay_between_retries_in_seconds as u64, - ), - allow_partial_connection: connection.allow_partial_connection, - sse_event_sender: inbound_sse_data_sender, - connection_timeout: Duration::from_secs( - connection.connection_timeout_in_seconds.unwrap_or(5) as u64, - ), - sleep_between_keep_alive_checks: Duration::from_secs( - connection - .sleep_between_keep_alive_checks_in_seconds - .unwrap_or(60) as u64, - ), - no_message_timeout: Duration::from_secs( - connection.no_message_timeout_in_seconds.unwrap_or(120) as u64, - ), - }; - Ok(event_listener_builder) -} -fn validate_config(config: &Config) -> Result<(), Error> { - if config - .connections - .iter() - .any(|connection| connection.max_attempts < 1) - { - return Err(Error::msg( - "Unable to run: max_attempts setting must be above 0 for the sidecar to attempt connection" - )); - } - Ok(()) -} + let max_worker_threads = config.max_thread_count.unwrap_or_else(num_cpus::get); + let max_blocking_threads = config + .max_thread_count + .unwrap_or(MAX_THREAD_COUNT - max_worker_threads); + panic::set_hook(Box::new(panic_hook)); -async fn flatten_handle(handle: JoinHandle>) -> Result { - match handle.await { - Ok(Ok(result)) => Ok(result), - Ok(Err(err)) => Err(err), - Err(join_err) => Err(Error::from(join_err)), - } + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(max_worker_threads) + .max_blocking_threads(max_blocking_threads) + .build() + .expect("Failed building sidecar runtime") + .block_on(run(config)) } -async fn handle_database_save_result( - entity_name: &str, - entity_identifier: &str, - res: Result, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - inbound_filter: Filter, - json_data: Option, - build_sse_data: F, -) where - F: FnOnce() -> SseData, -{ - match res { - Ok(_) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_start"); - if let Err(error) = outbound_sse_data_sender - .send((build_sse_data(), Some(inbound_filter), json_data)) - .await - { - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } else { - count_internal_event("main_inbound_sse_data", "outbound_sse_data_send_end"); - } - } - Err(DatabaseWriteError::UniqueConstraint(uc_err)) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - debug!( - "Already received {} ({}), logged in event_log", - entity_name, entity_identifier, - ); - trace!(?uc_err); - } - Err(other_err) => { - count_internal_event("main_inbound_sse_data", "db_save_end"); - count_error(format!("db_save_error_{}", entity_name).as_str()); - warn!(?other_err, "Unexpected error saving {}", entity_identifier); - } - } - count_internal_event("main_inbound_sse_data", "event_received_end"); +pub fn read_config(config_path: &str) -> Result { + let toml_content = + std::fs::read_to_string(config_path).context("Error reading config file contents")?; + toml::from_str(&toml_content).context("Error parsing config into TOML format") } -/// Function to handle single event in the sse_processor. -/// Returns false if the handling indicated that no other messages should be processed. -/// Returns true otherwise. -#[allow(clippy::too_many_lines)] -async fn handle_single_event( - sse_event: SseEvent, - database: Db, - enable_event_logging: bool, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - api_version_manager: GuardedApiVersionManager, -) { - match sse_event.data { - SseData::ApiVersion(_) | SseData::Shutdown => { - //don't do debug counting for ApiVersion since we don't store it - } - _ => { - count_internal_event("main_inbound_sse_data", "event_received_start"); - } - } - match sse_event.data { - SseData::SidecarVersion(_) => { - //Do nothing -> the inbound shouldn't produce this endpoint, it can be only produced by sidecar to the outbound - } - SseData::ApiVersion(version) => { - handle_api_version( - api_version_manager, - version, - &outbound_sse_data_sender, - sse_event.inbound_filter, - enable_event_logging, - ) - .await; - } - SseData::BlockAdded { block, block_hash } => { - if enable_event_logging { - let hex_block_hash = HexFmt(block_hash.inner()); - info!("Block Added: {:18}", hex_block_hash); - debug!("Block Added: {}", hex_block_hash); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_block_added( - BlockAdded::new(block_hash, block.clone()), - sse_event.id, - sse_event.source.to_string(), - ) - .await; - handle_database_save_result( - "BlockAdded", - HexFmt(block_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::BlockAdded { block, block_hash }, - ) - .await; - } - SseData::DeployAccepted { deploy } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy.hash().inner()); - info!("Deploy Accepted: {:18}", hex_deploy_hash); - debug!("Deploy Accepted: {}", hex_deploy_hash); - } - let deploy_accepted = DeployAccepted::new(deploy.clone()); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_accepted(deploy_accepted, sse_event.id, sse_event.source.to_string()) - .await; - handle_database_save_result( - "DeployAccepted", - HexFmt(deploy.hash().inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployAccepted { deploy }, - ) - .await; - } - SseData::DeployExpired { deploy_hash } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy_hash.inner()); - info!("Deploy Expired: {:18}", hex_deploy_hash); - debug!("Deploy Expired: {}", hex_deploy_hash); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_expired( - DeployExpired::new(deploy_hash), - sse_event.id, - sse_event.source.to_string(), - ) - .await; - handle_database_save_result( - "DeployExpired", - HexFmt(deploy_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployExpired { deploy_hash }, - ) - .await; - } - SseData::DeployProcessed { - deploy_hash, - account, - timestamp, - ttl, - dependencies, - block_hash, - execution_result, - } => { - if enable_event_logging { - let hex_deploy_hash = HexFmt(deploy_hash.inner()); - info!("Deploy Processed: {:18}", hex_deploy_hash); - debug!("Deploy Processed: {}", hex_deploy_hash); - } - let deploy_processed = DeployProcessed::new( - deploy_hash.clone(), - account.clone(), - timestamp, - ttl, - dependencies.clone(), - block_hash.clone(), - execution_result.clone(), - ); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_deploy_processed( - deploy_processed.clone(), - sse_event.id, - sse_event.source.to_string(), - ) - .await; +fn panic_hook(info: &PanicHookInfo) { + let backtrace = Backtrace::new(); - handle_database_save_result( - "DeployProcessed", - HexFmt(deploy_hash.inner()).to_string().as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::DeployProcessed { - deploy_hash, - account, - timestamp, - ttl, - dependencies, - block_hash, - execution_result, - }, - ) - .await; - } - SseData::Fault { - era_id, - timestamp, - public_key, - } => { - let fault = Fault::new(era_id, public_key.clone(), timestamp); - warn!(%fault, "Fault reported"); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_fault(fault.clone(), sse_event.id, sse_event.source.to_string()) - .await; + eprintln!("{:?}", backtrace); - handle_database_save_result( - "Fault", - format!("{:#?}", fault).as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::Fault { - era_id, - timestamp, - public_key, - }, - ) - .await; - } - SseData::FinalitySignature(fs) => { - if enable_event_logging { - debug!( - "Finality Signature: {} for {}", - fs.signature(), - fs.block_hash() - ); - } - let finality_signature = FinalitySignature::new(fs.clone()); - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_finality_signature( - finality_signature.clone(), - sse_event.id, - sse_event.source.to_string(), - ) - .await; - handle_database_save_result( - "FinalitySignature", - "", - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::FinalitySignature(fs), - ) - .await; - } - SseData::Step { - era_id, - execution_effect, - } => { - let step = Step::new(era_id, execution_effect.clone()); - if enable_event_logging { - info!("Step at era: {}", era_id.value()); - } - count_internal_event("main_inbound_sse_data", "db_save_start"); - let res = database - .save_step(step, sse_event.id, sse_event.source.to_string()) - .await; - handle_database_save_result( - "Step", - format!("{}", era_id.value()).as_str(), - res, - &outbound_sse_data_sender, - sse_event.inbound_filter, - sse_event.json_data, - || SseData::Step { - era_id, - execution_effect, - }, - ) - .await; - } - SseData::Shutdown => handle_shutdown(sse_event, database, outbound_sse_data_sender).await, - } -} - -async fn handle_shutdown( - sse_event: SseEvent, - sqlite_database: Db, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, -) { - warn!("Node ({}) is unavailable", sse_event.source.to_string()); - let res = sqlite_database - .save_shutdown(sse_event.id, sse_event.source.to_string()) - .await; - match res { - Ok(_) | Err(DatabaseWriteError::UniqueConstraint(_)) => { - // We push to outbound on UniqueConstraint error because in sse_server we match shutdowns to outbounds based on the filter they came from to prevent duplicates. - // But that also means that we need to pass through all the Shutdown events so the sse_server can determine to which outbound filters they need to be pushed (we - // don't store in DB the information from which filter did shutdown came). - if let Err(error) = outbound_sse_data_sender - .send(( - SseData::Shutdown, - Some(sse_event.inbound_filter), - sse_event.json_data, - )) - .await - { - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } - } - Err(other_err) => { - count_error("db_save_error_shutdown"); - warn!(?other_err, "Unexpected error saving Shutdown") - } - } -} - -async fn handle_api_version( - api_version_manager: std::sync::Arc>, - version: casper_types::ProtocolVersion, - outbound_sse_data_sender: &Sender<(SseData, Option, Option)>, - filter: Filter, - enable_event_logging: bool, -) { - let mut manager_guard = api_version_manager.lock().await; - let changed_newest_version = manager_guard.store_version(version); - if changed_newest_version { - if let Err(error) = outbound_sse_data_sender - .send((SseData::ApiVersion(version), Some(filter), None)) - .await - { - debug!( - "Error when sending to outbound_sse_data_sender. Error: {}", - error - ); - } - } - drop(manager_guard); - if enable_event_logging { - info!(%version, "API Version"); - } -} - -async fn sse_processor( - inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - database_supports_multithreaded_processing: bool, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, -) -> Result<(), Error> { - #[cfg(feature = "additional-metrics")] - let metrics_tx = start_metrics_thread("sse_save".to_string()); - // This task starts the listener pushing events to the sse_data_receiver - if database_supports_multithreaded_processing { - start_multi_threaded_events_consumer( - inbound_sse_data_receiver, - outbound_sse_data_sender, - database, - enable_event_logging, - api_version_manager, - #[cfg(feature = "additional-metrics")] - metrics_tx, - ) - .await; + // Print panic info + if let Some(s) = info.payload().downcast_ref::<&str>() { + eprintln!("sidecar panicked: {}", s); } else { - start_single_threaded_events_consumer( - inbound_sse_data_receiver, - outbound_sse_data_sender, - database, - enable_event_logging, - api_version_manager, - #[cfg(feature = "additional-metrics")] - metrics_tx, - ) - .await; + eprintln!("{}", info); } - Ok(()) + process::abort() } -fn handle_events_in_thread( - mut queue_rx: Receiver, - database: Db, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - api_version_manager: GuardedApiVersionManager, - enable_event_logging: bool, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - tokio::spawn(async move { - while let Some(sse_event) = queue_rx.recv().await { - handle_single_event( - sse_event, - database.clone(), - enable_event_logging, - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - ) - .await; - #[cfg(feature = "additional-metrics")] - let _ = metrics_sender.send(()).await; - } - }); -} +fn init_logging() -> anyhow::Result<()> { + const LOG_CONFIGURATION_ENVVAR: &str = "RUST_LOG"; -fn build_queues(cache_size: usize) -> HashMap, Receiver)> { - let mut map = HashMap::new(); - map.insert(Filter::Deploys, mpsc_channel(cache_size)); - map.insert(Filter::Events, mpsc_channel(cache_size)); - map.insert(Filter::Main, mpsc_channel(cache_size)); - map.insert(Filter::Sigs, mpsc_channel(cache_size)); - map -} + const LOG_FIELD_MESSAGE: &str = "message"; + const LOG_FIELD_TARGET: &str = "log.target"; + const LOG_FIELD_MODULE: &str = "log.module_path"; + const LOG_FIELD_FILE: &str = "log.file"; + const LOG_FIELD_LINE: &str = "log.line"; -async fn start_multi_threaded_events_consumer< - Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync + 'static, ->( - mut inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - let mut senders_and_receivers_map = build_queues(DEFAULT_CHANNEL_SIZE); - let mut senders_map = HashMap::new(); - for (filter, (tx, rx)) in senders_and_receivers_map.drain() { - handle_events_in_thread( - rx, - database.clone(), - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - enable_event_logging, - #[cfg(feature = "additional-metrics")] - metrics_sender.clone(), - ); - senders_map.insert(filter, tx); - } + type FormatDebugFn = fn(&mut Writer, &Field, &dyn std::fmt::Debug) -> fmt::Result; - while let Some(sse_event) = inbound_sse_data_receiver.recv().await { - if let Some(tx) = senders_map.get(&sse_event.inbound_filter) { - tx.send(sse_event).await.unwrap() - } else { - error!( - "Failed to find an sse handler queue for inbound filter {}", - sse_event.inbound_filter - ); - break; + fn format_into_debug_writer( + writer: &mut Writer, + field: &Field, + value: &dyn fmt::Debug, + ) -> fmt::Result { + match field.name() { + LOG_FIELD_MESSAGE => write!(writer, "{:?}", value), + LOG_FIELD_TARGET | LOG_FIELD_MODULE | LOG_FIELD_FILE | LOG_FIELD_LINE => Ok(()), + _ => write!(writer, "; {}={:?}", field, value), } } -} -async fn start_single_threaded_events_consumer< - Db: DatabaseReader + DatabaseWriter + Clone + Send + Sync, ->( - mut inbound_sse_data_receiver: Receiver, - outbound_sse_data_sender: Sender<(SseData, Option, Option)>, - database: Db, - enable_event_logging: bool, - api_version_manager: GuardedApiVersionManager, - #[cfg(feature = "additional-metrics")] metrics_sender: Sender<()>, -) { - while let Some(sse_event) = inbound_sse_data_receiver.recv().await { - handle_single_event( - sse_event, - database.clone(), - enable_event_logging, - outbound_sse_data_sender.clone(), - api_version_manager.clone(), - ) - .await; - #[cfg(feature = "additional-metrics")] - let _ = metrics_sender.send(()).await; - } -} + let formatter = format::debug_fn(format_into_debug_writer as FormatDebugFn); -fn count_error(reason: &str) { - metrics::ERROR_COUNTS - .with_label_values(&["main", reason]) - .inc(); -} + let filter = EnvFilter::new( + env::var(LOG_CONFIGURATION_ENVVAR) + .as_deref() + .unwrap_or("warn,casper_rpc_sidecar=info"), + ); -/// This metric is used for debugging of possible issues -/// with sidecar to determine at which step of processing there was a hang. -/// If we determine that this issue was fixed completely this can be removed -/// (the corresponding metric also). -fn count_internal_event(category: &str, reason: &str) { - metrics::INTERNAL_EVENTS - .with_label_values(&[category, reason]) - .inc(); + let builder = tracing_subscriber::fmt() + .with_writer(io::stdout as fn() -> io::Stdout) + .with_env_filter(filter) + .fmt_fields(formatter) + .with_filter_reloading(); + builder.try_init().map_err(|error| anyhow::anyhow!(error))?; + Ok(()) } diff --git a/sidecar/src/rest_server.rs b/sidecar/src/rest_server.rs deleted file mode 100644 index 8e8507b3..00000000 --- a/sidecar/src/rest_server.rs +++ /dev/null @@ -1,47 +0,0 @@ -mod errors; -pub mod filters; -mod handlers; -mod openapi; -#[cfg(test)] -mod tests; - -use std::net::TcpListener; -use std::time::Duration; - -use anyhow::Error; -use hyper::Server; -use tower::{buffer::Buffer, make::Shared, ServiceBuilder}; -use warp::Filter; - -use crate::{ - types::{config::RestServerConfig, database::DatabaseReader}, - utils::resolve_address, -}; - -const BIND_ALL_INTERFACES: &str = "0.0.0.0"; - -pub async fn run_server( - config: RestServerConfig, - database: Db, -) -> Result<(), Error> { - let api = filters::combined_filters(database); - let address = format!("{}:{}", BIND_ALL_INTERFACES, config.port); - let socket_address = resolve_address(&address)?; - - let listener = TcpListener::bind(socket_address)?; - - let warp_service = warp::service(api.with(warp::cors().allow_any_origin())); - let tower_service = ServiceBuilder::new() - .concurrency_limit(config.max_concurrent_requests as usize) - .rate_limit( - config.max_requests_per_second as u64, - Duration::from_secs(1), - ) - .service(warp_service); - - Server::from_tcp(listener)? - .serve(Shared::new(Buffer::new(tower_service, 50))) - .await?; - - Err(Error::msg("REST server shutting down")) -} diff --git a/sidecar/src/run.rs b/sidecar/src/run.rs new file mode 100644 index 00000000..036c1ae5 --- /dev/null +++ b/sidecar/src/run.rs @@ -0,0 +1,85 @@ +use crate::component::*; +use crate::config::SidecarConfig; +use anyhow::{anyhow, Context, Error}; +use casper_event_sidecar::LazyDatabaseWrapper; +use std::{process::ExitCode, time::Duration}; +use tokio::{ + signal::unix::{signal, SignalKind}, + time::timeout, +}; +use tracing::{error, info}; + +const MAX_COMPONENT_STARTUP_TIMEOUT_SECS: u64 = 30; + +pub async fn run(config: SidecarConfig) -> Result { + let maybe_database = config + .storage + .as_ref() + .filter(|storage_config| storage_config.is_enabled()) + .map(|storage_config| LazyDatabaseWrapper::new(storage_config.clone())); + let mut components: Vec> = Vec::new(); + let admin_api_component = AdminApiComponent::new(); + components.push(Box::new(admin_api_component)); + let rest_api_component = RestApiComponent::new(maybe_database.clone()); + components.push(Box::new(rest_api_component)); + let sse_server_component = SseServerComponent::new(maybe_database); + components.push(Box::new(sse_server_component)); + let rpc_api_component = RpcApiComponent::new(); + components.push(Box::new(rpc_api_component)); + + // setup signal handler futures to wait for interrupts + let mut sigterm = + signal(SignalKind::terminate()).context("Failed to initialize SIGTERM handler")?; + let mut sigint = + signal(SignalKind::interrupt()).context("Failed to initialize SIGINT handler")?; + + // run signal handlers and main task + tokio::select! { + _ = sigterm.recv() => { + info!("Received SIGTERM signal. Shutting down..."); + Ok(ExitCode::SUCCESS) + }, + _ = sigint.recv() => { + info!("Received SIGINT signal. Shutting down..."); + Ok(ExitCode::SUCCESS) + }, + res = do_run(config, components) => res.map_err(|component_error| { + error!("The server has exited with an error: {component_error}"); + anyhow!(component_error.to_string()) + }), + } +} + +async fn do_run( + config: SidecarConfig, + components: Vec>, +) -> Result { + let mut component_futures = Vec::new(); + let max_startup_duration = Duration::from_secs(MAX_COMPONENT_STARTUP_TIMEOUT_SECS); + for component in components.iter() { + let component_name = component.name(); + let component_startup_res = timeout( + max_startup_duration, + component.prepare_component_task(&config), + ) + .await; + if component_startup_res.is_err() { + return Err(ComponentError::Initialization { + component_name: component_name.clone(), + internal_error: anyhow!( + "Failed to start component {component_name} in {MAX_COMPONENT_STARTUP_TIMEOUT_SECS} [s]" + ), + }); + } + + let maybe_future = component_startup_res.unwrap()?; + if let Some(future) = maybe_future { + component_futures.push(future); + } + } + if component_futures.is_empty() { + info!("No runnable sidecar components are defined/enabled. Exiting"); + return Ok(ExitCode::SUCCESS); + } + futures::future::select_all(component_futures).await.0 +} diff --git a/sidecar/src/sql/tables/deploy_accepted.rs b/sidecar/src/sql/tables/deploy_accepted.rs deleted file mode 100644 index 47f62bd9..00000000 --- a/sidecar/src/sql/tables/deploy_accepted.rs +++ /dev/null @@ -1,71 +0,0 @@ -use sea_query::{ - error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, - InsertStatement, Query, SelectStatement, Table, TableCreateStatement, -}; - -use super::event_log::EventLog; - -#[derive(Iden)] -pub(super) enum DeployAccepted { - #[iden = "DeployAccepted"] - Table, - DeployHash, - Raw, - EventLogId, -} - -pub fn create_table_stmt() -> TableCreateStatement { - Table::create() - .table(DeployAccepted::Table) - .if_not_exists() - .col( - ColumnDef::new(DeployAccepted::DeployHash) - .string() - .not_null(), - ) - .col(ColumnDef::new(DeployAccepted::Raw).text().not_null()) - .col( - ColumnDef::new(DeployAccepted::EventLogId) - .big_unsigned() - .not_null(), - ) - .index( - Index::create() - .primary() - .name("PDX_DeployAccepted") - .col(DeployAccepted::DeployHash), - ) - .foreign_key( - ForeignKey::create() - .name("FK_event_log_id") - .from(DeployAccepted::Table, DeployAccepted::EventLogId) - .to(EventLog::Table, EventLog::EventLogId) - .on_delete(ForeignKeyAction::Restrict) - .on_update(ForeignKeyAction::Restrict), - ) - .to_owned() -} - -pub fn create_insert_stmt( - deploy_hash: String, - raw: String, - event_log_id: u64, -) -> SqResult { - Query::insert() - .into_table(DeployAccepted::Table) - .columns([ - DeployAccepted::DeployHash, - DeployAccepted::Raw, - DeployAccepted::EventLogId, - ]) - .values(vec![deploy_hash.into(), raw.into(), event_log_id.into()]) - .map(|stmt| stmt.to_owned()) -} - -pub fn create_get_by_hash_stmt(deploy_hash: String) -> SelectStatement { - Query::select() - .column(DeployAccepted::Raw) - .from(DeployAccepted::Table) - .and_where(Expr::col(DeployAccepted::DeployHash).eq(deploy_hash)) - .to_owned() -} diff --git a/sidecar/src/sql/tables/deploy_event.rs b/sidecar/src/sql/tables/deploy_event.rs deleted file mode 100644 index 84f87b79..00000000 --- a/sidecar/src/sql/tables/deploy_event.rs +++ /dev/null @@ -1,51 +0,0 @@ -use sea_query::{ - error::Result as SqResult, ColumnDef, ForeignKey, ForeignKeyAction, Iden, Index, - InsertStatement, Query, Table, TableCreateStatement, -}; - -use super::event_log::EventLog; - -#[derive(Iden)] -pub(super) enum DeployEvent { - Table, - EventLogId, - DeployHash, -} - -pub fn create_table_stmt() -> TableCreateStatement { - Table::create() - .table(DeployEvent::Table) - .if_not_exists() - .col( - ColumnDef::new(DeployEvent::EventLogId) - .big_unsigned() - .not_null(), - ) - .col(ColumnDef::new(DeployEvent::DeployHash).string().not_null()) - .index( - Index::create() - .primary() - .name("PDX_DeployEvent") - .col(DeployEvent::DeployHash) - .col(DeployEvent::EventLogId), - ) - .foreign_key( - ForeignKey::create() - .name("FK_event_log_id") - .from(DeployEvent::Table, DeployEvent::EventLogId) - .to(EventLog::Table, EventLog::EventLogId) - .on_delete(ForeignKeyAction::Restrict) - .on_update(ForeignKeyAction::Restrict), - ) - .to_owned() -} - -pub fn create_insert_stmt(event_log_id: u64, deploy_hash: String) -> SqResult { - let insert_stmt = Query::insert() - .into_table(DeployEvent::Table) - .columns([DeployEvent::EventLogId, DeployEvent::DeployHash]) - .values(vec![event_log_id.into(), deploy_hash.into()])? - .to_owned(); - - Ok(insert_stmt) -} diff --git a/sidecar/src/sql/tables/deploy_expired.rs b/sidecar/src/sql/tables/deploy_expired.rs deleted file mode 100644 index dd5ca8f5..00000000 --- a/sidecar/src/sql/tables/deploy_expired.rs +++ /dev/null @@ -1,71 +0,0 @@ -use sea_query::{ - error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, - InsertStatement, Query, SelectStatement, Table, TableCreateStatement, -}; - -use super::event_log::EventLog; - -#[derive(Iden)] -pub(super) enum DeployExpired { - #[iden = "DeployExpired"] - Table, - DeployHash, - Raw, - EventLogId, -} - -pub fn create_table_stmt() -> TableCreateStatement { - Table::create() - .table(DeployExpired::Table) - .if_not_exists() - .col( - ColumnDef::new(DeployExpired::DeployHash) - .string() - .not_null(), - ) - .col(ColumnDef::new(DeployExpired::Raw).text().not_null()) - .col( - ColumnDef::new(DeployExpired::EventLogId) - .big_unsigned() - .not_null(), - ) - .index( - Index::create() - .primary() - .name("PDX_DeployExpired") - .col(DeployExpired::DeployHash), - ) - .foreign_key( - ForeignKey::create() - .name("FK_event_log_id") - .from(DeployExpired::Table, DeployExpired::EventLogId) - .to(EventLog::Table, EventLog::EventLogId) - .on_delete(ForeignKeyAction::Restrict) - .on_update(ForeignKeyAction::Restrict), - ) - .to_owned() -} - -pub fn create_insert_stmt( - deploy_hash: String, - event_log_id: u64, - raw: String, -) -> SqResult { - Query::insert() - .into_table(DeployExpired::Table) - .columns([ - DeployExpired::DeployHash, - DeployExpired::EventLogId, - DeployExpired::Raw, - ]) - .values(vec![deploy_hash.into(), event_log_id.into(), raw.into()]) - .map(|stmt| stmt.to_owned()) -} - -pub fn create_get_by_hash_stmt(deploy_hash: String) -> SelectStatement { - Query::select() - .column(DeployExpired::Raw) - .from(DeployExpired::Table) - .and_where(Expr::col(DeployExpired::DeployHash).eq(deploy_hash)) - .to_owned() -} diff --git a/sidecar/src/sql/tables/deploy_processed.rs b/sidecar/src/sql/tables/deploy_processed.rs deleted file mode 100644 index 198b0cd9..00000000 --- a/sidecar/src/sql/tables/deploy_processed.rs +++ /dev/null @@ -1,71 +0,0 @@ -use sea_query::{ - error::Result as SqResult, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Iden, Index, - InsertStatement, Query, SelectStatement, Table, TableCreateStatement, -}; - -use super::event_log::EventLog; - -#[derive(Iden)] -pub enum DeployProcessed { - #[iden = "DeployProcessed"] - Table, - DeployHash, - Raw, - EventLogId, -} - -pub fn create_table_stmt() -> TableCreateStatement { - Table::create() - .table(DeployProcessed::Table) - .if_not_exists() - .col( - ColumnDef::new(DeployProcessed::DeployHash) - .string() - .not_null(), - ) - .col(ColumnDef::new(DeployProcessed::Raw).text().not_null()) - .col( - ColumnDef::new(DeployProcessed::EventLogId) - .big_unsigned() - .not_null(), - ) - .index( - Index::create() - .primary() - .name("PDX_DeployProcessed") - .col(DeployProcessed::DeployHash), - ) - .foreign_key( - ForeignKey::create() - .name("FK_event_log_id") - .from(DeployProcessed::Table, DeployProcessed::EventLogId) - .to(EventLog::Table, EventLog::EventLogId) - .on_delete(ForeignKeyAction::Restrict) - .on_update(ForeignKeyAction::Restrict), - ) - .to_owned() -} - -pub fn create_insert_stmt( - deploy_hash: String, - raw: String, - event_log_id: u64, -) -> SqResult { - Query::insert() - .into_table(DeployProcessed::Table) - .columns([ - DeployProcessed::DeployHash, - DeployProcessed::Raw, - DeployProcessed::EventLogId, - ]) - .values(vec![deploy_hash.into(), raw.into(), event_log_id.into()]) - .map(|stmt| stmt.to_owned()) -} - -pub fn create_get_by_hash_stmt(deploy_hash: String) -> SelectStatement { - Query::select() - .column(DeployProcessed::Raw) - .from(DeployProcessed::Table) - .and_where(Expr::col(DeployProcessed::DeployHash).eq(deploy_hash)) - .to_owned() -} diff --git a/sidecar/src/testing/fake_database.rs b/sidecar/src/testing/fake_database.rs deleted file mode 100644 index 5aee9dcc..00000000 --- a/sidecar/src/testing/fake_database.rs +++ /dev/null @@ -1,442 +0,0 @@ -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; -use std::time::{SystemTime, UNIX_EPOCH}; - -use async_trait::async_trait; -use casper_types::testing::TestRng; -use casper_types::AsymmetricType; -use rand::Rng; - -use casper_event_types::FinalitySignature as FinSig; - -use crate::types::{ - database::{ - DatabaseReadError, DatabaseReader, DatabaseWriteError, DatabaseWriter, DeployAggregate, - Migration, - }, - sse_events::*, -}; - -#[derive(Clone)] -pub struct FakeDatabase { - data: Arc>>, -} - -impl FakeDatabase { - pub(crate) fn new() -> Self { - Self { - data: Arc::new(Mutex::new(HashMap::new())), - } - } - - /// Creates random SSE event data and saves them, returning the identifiers for each record. - pub(crate) async fn populate_with_events( - &self, - ) -> Result { - let mut rng = TestRng::new(); - - let block_added = BlockAdded::random(&mut rng); - let deploy_accepted = DeployAccepted::random(&mut rng); - let deploy_processed = DeployProcessed::random(&mut rng, None); - let deploy_expired = DeployExpired::random(&mut rng, None); - let fault = Fault::random(&mut rng); - let finality_signature = FinalitySignature::random(&mut rng); - let step = Step::random(&mut rng); - - let test_stored_keys = IdentifiersForStoredEvents { - block_added_hash: block_added.hex_encoded_hash(), - block_added_height: block_added.get_height(), - deploy_accepted_hash: deploy_accepted.hex_encoded_hash(), - deploy_processed_hash: deploy_processed.hex_encoded_hash(), - deploy_expired_hash: deploy_expired.hex_encoded_hash(), - fault_era_id: fault.era_id.value(), - fault_public_key: fault.public_key.to_hex(), - finality_signatures_block_hash: finality_signature.hex_encoded_block_hash(), - step_era_id: step.era_id.value(), - }; - - self.save_block_added(block_added, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_deploy_accepted(deploy_accepted, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_deploy_processed(deploy_processed, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_deploy_expired(deploy_expired, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_fault(fault, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_finality_signature(finality_signature, rng.gen(), "127.0.0.1".to_string()) - .await?; - self.save_step(step, rng.gen(), "127.0.0.1".to_string()) - .await?; - - Ok(test_stored_keys) - } -} - -#[async_trait] -impl DatabaseWriter for FakeDatabase { - #[allow(unused)] - async fn save_block_added( - &self, - block_added: BlockAdded, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let identifier_hash = block_added.hex_encoded_hash(); - let identifier_height = block_added.get_height().to_string(); - let stringified_event = - serde_json::to_string(&block_added).expect("Error serialising event data"); - - // For the sake of keeping the test fixture simple, I'm saving the event twice, one record for each identifier. - - data.insert(identifier_hash, stringified_event.clone()); - - data.insert(identifier_height, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_deploy_accepted( - &self, - deploy_accepted: DeployAccepted, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let hash = deploy_accepted.hex_encoded_hash(); - // This is suffixed to allow storage of each deploy state event without overwriting. - let identifier = format!("{}-accepted", hash); - let stringified_event = - serde_json::to_string(&deploy_accepted).expect("Error serialising event data"); - - data.insert(identifier, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_deploy_processed( - &self, - deploy_processed: DeployProcessed, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let hash = deploy_processed.hex_encoded_hash(); - // This is suffixed to allow storage of each deploy state event without overwriting. - let identifier = format!("{}-processed", hash); - let stringified_event = - serde_json::to_string(&deploy_processed).expect("Error serialising event data"); - - data.insert(identifier, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_deploy_expired( - &self, - deploy_expired: DeployExpired, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let hash = deploy_expired.hex_encoded_hash(); - // This is suffixed to allow storage of each deploy state event without overwriting. - let identifier = format!("{}-expired", hash); - let stringified_event = - serde_json::to_string(&deploy_expired).expect("Error serialising event data"); - - data.insert(identifier, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_fault( - &self, - fault: Fault, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let identifier_era = fault.era_id.value().to_string(); - let identifier_public_key = fault.public_key.to_hex(); - - let stringified_event = - serde_json::to_string(&fault).expect("Error serialising event data"); - - // For the sake of keeping the test fixture simple, I'm saving the event twice, one record for each identifier. - - data.insert(identifier_era, stringified_event.clone()); - - data.insert(identifier_public_key, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_finality_signature( - &self, - finality_signature: FinalitySignature, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let identifier = finality_signature.hex_encoded_block_hash(); - let stringified_event = - serde_json::to_string(&finality_signature).expect("Error serialising event data"); - - data.insert(identifier, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_step( - &self, - step: Step, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - - let identifier = step.era_id.value().to_string(); - let stringified_event = serde_json::to_string(&step).expect("Error serialising event data"); - - data.insert(identifier, stringified_event); - - Ok(0) - } - - #[allow(unused)] - async fn save_shutdown( - &self, - event_id: u32, - event_source_address: String, - ) -> Result { - let mut data = self.data.lock().expect("Error acquiring lock on data"); - let unix_timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs(); - let event_key = format!("{}-{}", event_source_address, unix_timestamp); - let stringified_event = serde_json::to_string("{}").expect("Error serialising event data"); - - data.insert(event_key, stringified_event); - Ok(0) - } - - async fn execute_migration(&self, _migration: Migration) -> Result<(), DatabaseWriteError> { - //Nothing to do here - Ok(()) - } -} - -#[async_trait] -impl DatabaseReader for FakeDatabase { - async fn get_latest_block(&self) -> Result { - let mut test_rng = TestRng::new(); - - let block_added = BlockAdded::random(&mut test_rng); - - Ok(block_added) - } - - async fn get_block_by_height(&self, height: u64) -> Result { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&height.to_string()) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_block_by_hash(&self, hash: &str) -> Result { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(hash) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_deploy_aggregate_by_hash( - &self, - hash: &str, - ) -> Result { - let data = self.data.lock().expect("Error acquiring lock on data"); - - let accepted_key = format!("{}-accepted", hash); - let processed_key = format!("{}-processed", hash); - let expired_key = format!("{}-expired", hash); - - return if let Some(accepted) = data.get(&accepted_key) { - let deploy_accepted = serde_json::from_str::(accepted) - .map_err(DatabaseReadError::Serialisation)?; - - if let Some(processed) = data.get(&processed_key) { - let deploy_processed = serde_json::from_str::(processed) - .map_err(DatabaseReadError::Serialisation)?; - - Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: Some(deploy_processed), - deploy_expired: false, - }) - } else if data.get(&expired_key).is_some() { - let deploy_expired = match data.get(&expired_key) { - None => None, - Some(raw) => Some( - serde_json::from_str::(raw) - .map_err(DatabaseReadError::Serialisation)?, - ), - }; - Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: None, - deploy_expired: deploy_expired.is_some(), - }) - } else { - Ok(DeployAggregate { - deploy_hash: hash.to_string(), - deploy_accepted: Some(deploy_accepted), - deploy_processed: None, - deploy_expired: false, - }) - } - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_deploy_accepted_by_hash( - &self, - hash: &str, - ) -> Result { - let identifier = format!("{}-accepted", hash); - - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&identifier) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_deploy_processed_by_hash( - &self, - hash: &str, - ) -> Result { - let identifier = format!("{}-processed", hash); - - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&identifier) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_deploy_expired_by_hash( - &self, - hash: &str, - ) -> Result { - let identifier = format!("{}-expired", hash); - - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&identifier) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_faults_by_public_key( - &self, - public_key: &str, - ) -> Result, DatabaseReadError> { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(public_key) { - let fault = - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; - Ok(vec![fault]) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_faults_by_era(&self, era: u64) -> Result, DatabaseReadError> { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&era.to_string()) { - let fault = - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; - Ok(vec![fault]) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_finality_signatures_by_block( - &self, - block_hash: &str, - ) -> Result, DatabaseReadError> { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(block_hash) { - let finality_signature = - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation)?; - Ok(vec![finality_signature]) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_step_by_era(&self, era: u64) -> Result { - let data = self.data.lock().expect("Error acquiring lock on data"); - - return if let Some(event) = data.get(&era.to_string()) { - serde_json::from_str::(event).map_err(DatabaseReadError::Serialisation) - } else { - Err(DatabaseReadError::NotFound) - }; - } - - async fn get_number_of_events(&self) -> Result { - Ok(0) - } - - async fn get_newest_migration_version(&self) -> Result, DatabaseReadError> { - Ok(None) - } -} - -pub struct IdentifiersForStoredEvents { - pub block_added_hash: String, - pub block_added_height: u64, - pub deploy_accepted_hash: String, - pub deploy_processed_hash: String, - pub deploy_expired_hash: String, - pub fault_public_key: String, - pub fault_era_id: u64, - pub finality_signatures_block_hash: String, - pub step_era_id: u64, -} diff --git a/sidecar/src/tests.rs b/sidecar/src/tests.rs deleted file mode 100644 index 436dfdd7..00000000 --- a/sidecar/src/tests.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod integration_tests; -pub mod integration_tests_version_switch; -pub mod performance_tests; diff --git a/sidecar/src/types/sse_events.rs b/sidecar/src/types/sse_events.rs deleted file mode 100644 index 5e63a769..00000000 --- a/sidecar/src/types/sse_events.rs +++ /dev/null @@ -1,213 +0,0 @@ -#[cfg(test)] -use casper_event_types::Digest; -use casper_event_types::{BlockHash, Deploy, DeployHash, FinalitySignature as FinSig, JsonBlock}; -#[cfg(test)] -use casper_types::testing::TestRng; -use casper_types::{ - AsymmetricType, EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp, -}; -use derive_new::new; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; -use serde_json::value::RawValue; -use std::{ - fmt::{Display, Formatter}, - sync::Arc, -}; -use utoipa::ToSchema; - -/// The version of this node's API server. This event will always be the first sent to a new -/// client, and will have no associated event ID provided. -#[derive(Clone, Debug, Serialize, Deserialize, new)] -pub struct ApiVersion(ProtocolVersion); - -/// The given block has been added to the linear chain and stored locally. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct BlockAdded { - block_hash: BlockHash, - block: Box, -} - -#[cfg(test)] -impl BlockAdded { - pub fn random(rng: &mut TestRng) -> Self { - let block = JsonBlock::random(rng); - Self { - block_hash: block.hash, - block: Box::new(block), - } - } -} - -impl BlockAdded { - pub fn hex_encoded_hash(&self) -> String { - hex::encode(self.block_hash.inner()) - } - - pub fn get_height(&self) -> u64 { - self.block.header.height - } -} - -/// The given deploy has been newly-accepted by this node. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct DeployAccepted { - // It's an Arc to not create multiple copies of the same deploy for multiple subscribers. - deploy: Arc, -} - -impl DeployAccepted { - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - Self { - deploy: Arc::new(Deploy::random(rng)), - } - } - - #[cfg(test)] - pub fn deploy_hash(&self) -> DeployHash { - self.deploy.hash().to_owned() - } - - pub fn hex_encoded_hash(&self) -> String { - hex::encode(self.deploy.hash().inner()) - } -} - -/// The given deploy has been executed, committed and forms part of the given block. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct DeployProcessed { - deploy_hash: Box, - #[schema(value_type = String)] - account: Box, - #[schema(value_type = String)] - timestamp: Timestamp, - #[schema(value_type = String)] - ttl: TimeDiff, - dependencies: Vec, - block_hash: Box, - execution_result: Box, -} - -impl DeployProcessed { - #[cfg(test)] - pub fn random(rng: &mut TestRng, with_deploy_hash: Option) -> Self { - let deploy = Deploy::random(rng); - Self { - deploy_hash: Box::new(with_deploy_hash.unwrap_or(*deploy.hash())), - account: Box::new(deploy.header().account().clone()), - timestamp: deploy.header().timestamp(), - ttl: deploy.header().ttl(), - dependencies: deploy.header().dependencies().clone(), - block_hash: Box::new(BlockHash::random(rng)), - execution_result: Box::new(rng.gen()), - } - } - - pub fn hex_encoded_hash(&self) -> String { - hex::encode(self.deploy_hash.inner()) - } -} - -/// The given deploy has expired. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct DeployExpired { - deploy_hash: DeployHash, -} - -impl DeployExpired { - #[cfg(test)] - pub fn random(rng: &mut TestRng, with_deploy_hash: Option) -> Self { - Self { - deploy_hash: with_deploy_hash.unwrap_or_else(|| DeployHash::new(Digest::random(rng))), - } - } - - pub fn hex_encoded_hash(&self) -> String { - hex::encode(self.deploy_hash.inner()) - } -} - -/// Generic representation of validator's fault in an era. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct Fault { - #[schema(value_type = u64)] - pub era_id: EraId, - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - pub public_key: PublicKey, - #[schema(value_type = String)] - pub timestamp: Timestamp, -} - -impl Fault { - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - Self { - era_id: EraId::new(rng.gen()), - public_key: PublicKey::random(rng), - timestamp: Timestamp::random(rng), - } - } -} - -impl Display for Fault { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{:#?}", self) - } -} - -/// New finality signature received. -#[derive(Clone, Debug, Serialize, Deserialize, new)] -pub struct FinalitySignature(Box); - -impl FinalitySignature { - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - Self(Box::new(FinSig::random_for_block( - BlockHash::random(rng), - rng.gen(), - rng, - ))) - } - - pub fn inner(&self) -> FinSig { - *self.0.clone() - } - - pub fn hex_encoded_block_hash(&self) -> String { - hex::encode(self.0.block_hash().inner()) - } - - pub fn hex_encoded_public_key(&self) -> String { - self.0.public_key().to_hex() - } -} - -/// The execution effects produced by a `StepRequest`. -#[derive(Clone, Debug, Serialize, Deserialize, new, ToSchema)] -pub struct Step { - #[schema(value_type = u64)] - pub era_id: EraId, - #[schema(value_type = ExecutionEffect)] - //This technically is not amorphic, but this field is potentially > 30MB of size. By not parsing it we make the process of intaking these messages much quicker and less memory consuming - execution_effect: Box, -} - -impl Step { - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - use serde_json::value::to_raw_value; - - let execution_effect = match rng.gen::() { - ExecutionResult::Success { effect, .. } | ExecutionResult::Failure { effect, .. } => { - effect - } - }; - Self { - era_id: EraId::new(rng.gen()), - execution_effect: to_raw_value(&execution_effect).unwrap(), - } - } -} diff --git a/types/Cargo.toml b/types/Cargo.toml index 6e396279..8d5bf75f 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -5,23 +5,29 @@ edition = "2021" description = "Types for casper-event-listener library" license-file = "../LICENSE" documentation = "README.md" -homepage = "https://github.com/CasperLabs/event-sidecar" -repository = "https://github.com/CasperLabs/event-sidecar" +homepage = "https://github.com/casper-network/casper-sidecar/" +repository = "https://github.com/casper-network/casper-sidecar/" [dependencies] base16 = "0.2.1" blake2 = { version = "0.9.0", optional = true } -casper-types = { version = "3.0.0", features = ["std"] } +casper-types = { workspace = true, features = ["std"] } hex-buffer-serde = "0.3.0" hex_fmt = "0.3.0" -once_cell = {workspace = true} -prometheus = { version = "0.13.3", features = ["process"]} +itertools = { workspace = true } +mockall = "0.12.1" +once_cell = { workspace = true } +pretty_assertions = "1.4.0" rand = { version = "0.8.5", optional = true } -serde = { version = "1", features = ["derive", "rc"] } +serde = { workspace = true, default-features = true, features = ["derive", "rc"] } serde_json = { version = "1.0", default-features = false, features = ["alloc", "raw_value"] } -thiserror = "1.0.39" -utoipa = { version = "3.4.4", features = ["rc_schema"]} +thiserror = { workspace = true } +utoipa = { version = "4", features = ["rc_schema"] } [features] sse-data-testing = ["blake2", "casper-types/testing", "rand"] additional-metrics = [] + +[dev-dependencies] +casper-types = { workspace = true, features = ["std", "testing"] } +rand = "0.8.3" diff --git a/types/src/block.rs b/types/src/block.rs deleted file mode 100644 index 12441360..00000000 --- a/types/src/block.rs +++ /dev/null @@ -1,656 +0,0 @@ -use casper_types::{ - bytesrepr, EraId, ProtocolVersion, PublicKey, SecretKey, Signature, Timestamp, U512, -}; -#[cfg(feature = "sse-data-testing")] -use casper_types::{bytesrepr::ToBytes, crypto, testing::TestRng}; -#[cfg(feature = "sse-data-testing")] -use rand::Rng; -use serde::{Deserialize, Serialize}; -#[cfg(feature = "sse-data-testing")] -use std::iter; -use std::{ - collections::BTreeMap, - fmt::{self, Display, Formatter}, - hash::Hash, -}; -use utoipa::ToSchema; - -use crate::{DeployHash, Digest}; - -/// A cryptographic hash identifying a [`Block`]. -#[derive( - Copy, - Clone, - Default, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Debug, - ToSchema, -)] -#[serde(deny_unknown_fields)] -pub struct BlockHash(Digest); - -impl BlockHash { - /// Returns the wrapped inner hash. - pub fn inner(&self) -> &Digest { - &self.0 - } -} - -impl Display for BlockHash { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "block-hash({})", self.0,) - } -} - -#[cfg(feature = "sse-data-testing")] -impl BlockHash { - /// Creates a random block hash. - pub fn random(rng: &mut TestRng) -> Self { - let hash = Digest::from(rng.gen::<[u8; Digest::LENGTH]>()); - BlockHash(hash) - } -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for BlockHash { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -pub struct EraReport { - #[schema(value_type = Vec)] - equivocators: Vec, - #[schema(value_type = Map)] - rewards: BTreeMap, - #[schema(value_type = Vec)] - inactive_validators: Vec, -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for EraReport { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.equivocators.to_bytes()?); - buffer.extend(self.rewards.to_bytes()?); - buffer.extend(self.inactive_validators.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.equivocators.serialized_length() - + self.rewards.serialized_length() - + self.inactive_validators.serialized_length() - } -} - -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] -/// A struct to contain information related to the end of an era and validator weights for the -/// following era. -pub struct EraEnd { - /// The era end information. - era_report: EraReport, - /// The validator weights for the next era. - next_era_validator_weights: BTreeMap, -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for EraEnd { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.era_report.to_bytes()?); - buffer.extend(self.next_era_validator_weights.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.era_report.serialized_length() + self.next_era_validator_weights.serialized_length() - } -} - -/// The header portion of a [`Block`]. -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -pub struct BlockHeader { - parent_hash: BlockHash, - state_root_hash: Digest, - body_hash: Digest, - random_bit: bool, - accumulated_seed: Digest, - era_end: Option, - #[schema(value_type = String)] - timestamp: Timestamp, - #[schema(value_type = u64)] - era_id: EraId, - height: u64, - /// The protocol version. - #[schema(value_type = String)] - protocol_version: ProtocolVersion, -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for BlockHeader { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.parent_hash.to_bytes()?); - buffer.extend(self.state_root_hash.to_bytes()?); - buffer.extend(self.body_hash.to_bytes()?); - buffer.extend(self.random_bit.to_bytes()?); - buffer.extend(self.accumulated_seed.to_bytes()?); - buffer.extend(self.era_end.to_bytes()?); - buffer.extend(self.timestamp.to_bytes()?); - buffer.extend(self.era_id.to_bytes()?); - buffer.extend(self.height.to_bytes()?); - buffer.extend(self.protocol_version.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.parent_hash.serialized_length() - + self.state_root_hash.serialized_length() - + self.body_hash.serialized_length() - + self.random_bit.serialized_length() - + self.accumulated_seed.serialized_length() - + self.era_end.serialized_length() - + self.timestamp.serialized_length() - + self.era_id.serialized_length() - + self.height.serialized_length() - + self.protocol_version.serialized_length() - } -} - -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -pub struct BlockBody { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - proposer: PublicKey, - deploy_hashes: Vec, - transfer_hashes: Vec, -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for BlockBody { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.proposer.to_bytes()?); - buffer.extend(self.deploy_hashes.to_bytes()?); - buffer.extend(self.transfer_hashes.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.proposer.serialized_length() - + self.deploy_hashes.serialized_length() - + self.transfer_hashes.serialized_length() - } -} - -#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Block { - hash: BlockHash, - header: BlockHeader, - body: BlockBody, -} - -#[cfg(feature = "sse-data-testing")] -impl Block { - /// The hash of this block's header. - pub fn hash(&self) -> &BlockHash { - &self.hash - } - - pub fn random(rng: &mut TestRng) -> Self { - // Create the block body. - let proposer = PublicKey::random(rng); - let deploy_count = rng.gen_range(0..11); - let deploy_hashes = iter::repeat_with(|| DeployHash::new(Digest::random(rng))) - .take(deploy_count) - .collect(); - let transfer_count = rng.gen_range(0..11); - let transfer_hashes = iter::repeat_with(|| DeployHash::new(Digest::random(rng))) - .take(transfer_count) - .collect(); - let body = BlockBody { - proposer, - deploy_hashes, - transfer_hashes, - }; - // Create the block header. - let header = random_block_header(rng, &body); - - // Create the block hash. - let serialized_header = header - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); - let hash = BlockHash(Digest::hash(serialized_header)); - - Block { hash, header, body } - } -} - -#[cfg(feature = "sse-data-testing")] -fn random_block_header(rng: &mut TestRng, body: &BlockBody) -> BlockHeader { - let parent_hash = BlockHash(Digest::random(rng)); - let state_root_hash = Digest::random(rng); - let serialized_body = body - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); - let body_hash = Digest::hash(serialized_body); - let random_bit = rng.gen(); - let accumulated_seed = Digest::random(rng); - let is_switch = rng.gen_bool(0.1); - let era_end = if is_switch { - Some(random_era_end(rng)) - } else { - None - }; - let timestamp = Timestamp::now(); - let era = rng.gen_range(1..6); - let height = era * 10 + rng.gen_range(0..10); - BlockHeader { - parent_hash, - state_root_hash, - body_hash, - random_bit, - accumulated_seed, - era_end, - timestamp, - era_id: EraId::new(era), - height, - protocol_version: ProtocolVersion::V1_0_0, - } -} - -#[cfg(feature = "sse-data-testing")] -fn random_era_end(rng: &mut TestRng) -> EraEnd { - const BLOCK_REWARD: u64 = 1_000_000_000_000; - let equivocators_count = rng.gen_range(0..5); - let rewards_count = rng.gen_range(0..5); - let inactive_count = rng.gen_range(0..5); - let era_report = EraReport { - equivocators: iter::repeat_with(|| PublicKey::random(rng)) - .take(equivocators_count) - .collect(), - rewards: iter::repeat_with(|| { - let public_key = PublicKey::random(rng); - let reward = rng.gen_range(1..(BLOCK_REWARD + 1)); - (public_key, reward) - }) - .take(rewards_count) - .collect(), - inactive_validators: iter::repeat_with(|| PublicKey::random(rng)) - .take(inactive_count) - .collect(), - }; - let validator_count = rng.gen_range(0..11); - let next_era_validator_weights = iter::repeat_with(|| (PublicKey::random(rng), rng.gen())) - .take(validator_count) - .collect(); - EraEnd { - era_report, - next_era_validator_weights, - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, ToSchema)] -pub struct FinalitySignature { - block_hash: BlockHash, - #[schema(value_type = u64)] - era_id: EraId, - #[schema(value_type = String)] - signature: Signature, - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - public_key: PublicKey, -} - -impl FinalitySignature { - /// Hash of a block this signature is for. - pub fn block_hash(&self) -> &BlockHash { - &self.block_hash - } - - /// Era in which the block was created in. - pub fn era_id(&self) -> EraId { - self.era_id - } - - /// Signature over the block hash. - pub fn signature(&self) -> &Signature { - &self.signature - } - - /// Public key of the signing validator. - pub fn public_key(&self) -> &PublicKey { - &self.public_key - } -} - -#[cfg(feature = "sse-data-testing")] -impl FinalitySignature { - pub fn random_for_block(block_hash: BlockHash, era_id: u64, rng: &mut TestRng) -> Self { - let mut bytes = block_hash.inner().into_vec(); - bytes.extend_from_slice(&era_id.to_le_bytes()); - let secret_key = SecretKey::random(rng); - let public_key = PublicKey::from(&secret_key); - let signature = crypto::sign(bytes, &secret_key, &public_key); - - FinalitySignature { - block_hash, - era_id: EraId::new(era_id), - signature, - public_key, - } - } -} - -pub mod json_compatibility { - use super::*; - use casper_types::PublicKey; - use utoipa::ToSchema; - - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct Reward { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - validator: PublicKey, - #[schema(value_type = String)] - amount: u64, - } - - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct ValidatorWeight { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - validator: PublicKey, - #[schema(value_type = String)] - weight: U512, - } - - /// Equivocation and reward information to be included in the terminal block. - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonEraReport { - #[schema(value_type = Vec)] - equivocators: Vec, - rewards: Vec, - #[schema(value_type = Vec)] - inactive_validators: Vec, - } - - impl From for JsonEraReport { - fn from(era_report: EraReport) -> Self { - JsonEraReport { - equivocators: era_report.equivocators, - rewards: era_report - .rewards - .into_iter() - .map(|(validator, amount)| Reward { validator, amount }) - .collect(), - inactive_validators: era_report.inactive_validators, - } - } - } - - impl From for EraReport { - fn from(era_report: JsonEraReport) -> Self { - let equivocators = era_report.equivocators; - let rewards = era_report - .rewards - .into_iter() - .map(|reward| (reward.validator, reward.amount)) - .collect(); - let inactive_validators = era_report.inactive_validators; - EraReport { - equivocators, - rewards, - inactive_validators, - } - } - } - - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonEraEnd { - era_report: JsonEraReport, - next_era_validator_weights: Vec, - } - - impl From for JsonEraEnd { - fn from(data: EraEnd) -> Self { - let json_era_end = JsonEraReport::from(data.era_report); - let json_validator_weights = data - .next_era_validator_weights - .iter() - .map(|(validator, weight)| ValidatorWeight { - validator: validator.clone(), - weight: *weight, - }) - .collect(); - JsonEraEnd { - era_report: json_era_end, - next_era_validator_weights: json_validator_weights, - } - } - } - - impl From for EraEnd { - fn from(json_data: JsonEraEnd) -> Self { - let era_report = EraReport::from(json_data.era_report); - let next_era_validator_weights = json_data - .next_era_validator_weights - .iter() - .map(|validator_weight| { - (validator_weight.validator.clone(), validator_weight.weight) - }) - .collect(); - EraEnd { - era_report, - next_era_validator_weights, - } - } - } - - /// JSON representation of a block header. - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonBlockHeader { - /// The parent hash. - pub parent_hash: BlockHash, - /// The state root hash. - pub state_root_hash: Digest, - /// The body hash. - pub body_hash: Digest, - /// Randomness bit. - pub random_bit: bool, - /// Accumulated seed. - pub accumulated_seed: Digest, - /// The era end. - pub era_end: Option, - /// The block timestamp. - #[schema(value_type = String)] - pub timestamp: Timestamp, - /// The block era id. - #[schema(value_type = u64)] - pub era_id: EraId, - /// The block height. - pub height: u64, - /// The protocol version. - #[schema(value_type = String)] - pub protocol_version: ProtocolVersion, - } - - impl From for JsonBlockHeader { - fn from(block_header: BlockHeader) -> Self { - JsonBlockHeader { - parent_hash: block_header.parent_hash, - state_root_hash: block_header.state_root_hash, - body_hash: block_header.body_hash, - random_bit: block_header.random_bit, - accumulated_seed: block_header.accumulated_seed, - era_end: block_header.era_end.map(JsonEraEnd::from), - timestamp: block_header.timestamp, - era_id: block_header.era_id, - height: block_header.height, - protocol_version: block_header.protocol_version, - } - } - } - - impl From for BlockHeader { - fn from(block_header: JsonBlockHeader) -> Self { - BlockHeader { - parent_hash: block_header.parent_hash, - state_root_hash: block_header.state_root_hash, - body_hash: block_header.body_hash, - random_bit: block_header.random_bit, - accumulated_seed: block_header.accumulated_seed, - era_end: block_header.era_end.map(EraEnd::from), - timestamp: block_header.timestamp, - era_id: block_header.era_id, - height: block_header.height, - protocol_version: block_header.protocol_version, - } - } - } - - /// A JSON-friendly representation of `Body` - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonBlockBody { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - proposer: PublicKey, - deploy_hashes: Vec, - transfer_hashes: Vec, - } - - impl From for JsonBlockBody { - fn from(body: BlockBody) -> Self { - JsonBlockBody { - proposer: body.proposer.clone(), - deploy_hashes: body.deploy_hashes.clone(), - transfer_hashes: body.transfer_hashes, - } - } - } - - impl From for BlockBody { - fn from(json_body: JsonBlockBody) -> Self { - BlockBody { - proposer: json_body.proposer, - deploy_hashes: json_body.deploy_hashes, - transfer_hashes: json_body.transfer_hashes, - } - } - } - - /// A JSON-friendly representation of `Block`. - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonBlock { - /// `BlockHash` - pub hash: BlockHash, - /// JSON-friendly block header. - pub header: JsonBlockHeader, - /// JSON-friendly block body. - pub body: JsonBlockBody, - /// JSON-friendly list of proofs for this block. - pub proofs: Vec, - } - - impl JsonBlock { - /// Creates a new JSON block with no proofs from a linear chain block. - pub fn new_unsigned(block: Block) -> Self { - JsonBlock { - hash: block.hash, - header: JsonBlockHeader::from(block.header.clone()), - body: JsonBlockBody::from(block.body), - proofs: Vec::new(), - } - } - - /// Returns the hashes of the `Deploy`s included in the `Block`. - pub fn deploy_hashes(&self) -> &Vec { - &self.body.deploy_hashes - } - - /// Returns the hashes of the transfer `Deploy`s included in the `Block`. - pub fn transfer_hashes(&self) -> &Vec { - &self.body.transfer_hashes - } - - #[cfg(feature = "sse-data-testing")] - pub fn random(rng: &mut TestRng) -> Self { - let block = Block::random(rng); - let proofs_count = rng.gen_range(0..11); - let proofs = iter::repeat_with(|| { - let finality_signature = FinalitySignature::random_for_block( - block.hash, - block.header.era_id.value(), - rng, - ); - JsonProof { - public_key: finality_signature.public_key, - signature: finality_signature.signature, - } - }) - .take(proofs_count) - .collect(); - JsonBlock { - hash: block.hash, - header: JsonBlockHeader::from(block.header.clone()), - body: JsonBlockBody::from(block.body), - proofs, - } - } - } - - impl From for Block { - fn from(block: JsonBlock) -> Self { - Block { - hash: block.hash, - header: BlockHeader::from(block.header), - body: BlockBody::from(block.body), - } - } - } - - /// A JSON-friendly representation of a proof, i.e. a block's finality signature. - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema)] - #[serde(deny_unknown_fields)] - pub struct JsonProof { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - public_key: PublicKey, - #[schema(value_type = String)] - signature: Signature, - } - - impl From<(PublicKey, Signature)> for JsonProof { - fn from((public_key, signature): (PublicKey, Signature)) -> JsonProof { - JsonProof { - public_key, - signature, - } - } - } - - impl From for (PublicKey, Signature) { - fn from(proof: JsonProof) -> (PublicKey, Signature) { - (proof.public_key, proof.signature) - } - } -} diff --git a/types/src/deploy.rs b/types/src/deploy.rs deleted file mode 100644 index 148fbeb1..00000000 --- a/types/src/deploy.rs +++ /dev/null @@ -1,311 +0,0 @@ -#[cfg(feature = "sse-data-testing")] -use std::iter; -use std::{ - collections::BTreeSet, - fmt::{self, Display, Formatter}, -}; - -#[cfg(feature = "sse-data-testing")] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "sse-data-testing")] -use casper_types::{bytesrepr::ToBytes, testing::TestRng}; -use casper_types::{ - bytesrepr::{self}, - runtime_args, PublicKey, RuntimeArgs, SecretKey, Signature, TimeDiff, Timestamp, U512, -}; -use utoipa::ToSchema; - -use crate::{Digest, ExecutableDeployItem}; - -/// A cryptographic hash uniquely identifying a [`Deploy`]. -#[derive( - Copy, - Clone, - Default, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Debug, - ToSchema, -)] -#[serde(deny_unknown_fields)] -pub struct DeployHash(Digest); - -impl DeployHash { - /// Returns a new `DeployHash`. - pub fn new(digest: Digest) -> Self { - DeployHash(digest) - } - - /// Returns a copy of the wrapped `Digest`. - pub fn inner(&self) -> &Digest { - &self.0 - } -} - -impl Display for DeployHash { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "{}", self.0) - } -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for DeployHash { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -/// The header portion of a [`Deploy`]. -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -#[serde(deny_unknown_fields)] -pub struct DeployHeader { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - account: PublicKey, - #[schema(value_type = String)] - timestamp: Timestamp, - #[schema(value_type = String)] - ttl: TimeDiff, - gas_price: u64, - body_hash: Digest, - dependencies: Vec, - chain_name: String, -} - -impl DeployHeader { - /// Returns the account within which the deploy will be run. - pub fn account(&self) -> &PublicKey { - &self.account - } - - /// Returns the deploy creation timestamp. - pub fn timestamp(&self) -> Timestamp { - self.timestamp - } - - /// Returns the duration for which the deploy will stay valid. - pub fn ttl(&self) -> TimeDiff { - self.ttl - } - - /// Returns the price per gas unit for this deploy. - pub fn gas_price(&self) -> u64 { - self.gas_price - } - - /// Returns the hash of the body of this deploy. - pub fn body_hash(&self) -> &Digest { - &self.body_hash - } - - /// Other deploys that have to be run before this one. - pub fn dependencies(&self) -> &Vec { - &self.dependencies - } - - /// Returns the chain name of the network the deploy is supposed to be run on. - pub fn chain_name(&self) -> &str { - &self.chain_name - } -} - -impl Display for DeployHeader { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!( - formatter, - "deploy header {{ account {}, timestamp {}, ttl {}, body hash {}, chain name {} }}", - self.account, self.timestamp, self.ttl, self.body_hash, self.chain_name, - ) - } -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for DeployHeader { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.account.to_bytes()?); - buffer.extend(self.timestamp.to_bytes()?); - buffer.extend(self.ttl.to_bytes()?); - buffer.extend(self.gas_price.to_bytes()?); - buffer.extend(self.body_hash.to_bytes()?); - buffer.extend(self.dependencies.to_bytes()?); - buffer.extend(self.chain_name.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.account.serialized_length() - + self.timestamp.serialized_length() - + self.ttl.serialized_length() - + self.gas_price.serialized_length() - + self.body_hash.serialized_length() - + self.dependencies.serialized_length() - + self.chain_name.serialized_length() - } -} - -/// The signature of a deploy and the public key of the signer. -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -#[serde(deny_unknown_fields)] -pub struct Approval { - /// "Hex-encoded cryptographic public key, including the algorithm tag prefix." - #[schema(value_type = String)] - signer: PublicKey, - #[schema(value_type = String)] - signature: Signature, -} - -#[cfg(feature = "sse-data-testing")] -impl Approval { - pub fn create(hash: &DeployHash, secret_key: &SecretKey) -> Self { - let signer = PublicKey::from(secret_key); - let signature = casper_types::sign(hash.0, secret_key, &signer); - Self { signer, signature } - } -} - -/// A signed item sent to the network used to request execution of Wasm. -#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, ToSchema)] -#[serde(deny_unknown_fields)] -pub struct Deploy { - hash: DeployHash, - header: DeployHeader, - payment: ExecutableDeployItem, - session: ExecutableDeployItem, - #[schema(value_type = Vec)] - approvals: BTreeSet, -} - -impl Deploy { - /// Returns the hash uniquely identifying this deploy. - pub fn hash(&self) -> &DeployHash { - &self.hash - } - - /// Returns the header portion of the deploy. - pub fn header(&self) -> &DeployHeader { - &self.header - } - - /// Returns the payment code of the deploy. - pub fn payment(&self) -> &ExecutableDeployItem { - &self.payment - } - - /// Returns the session code of the deploy. - pub fn session(&self) -> &ExecutableDeployItem { - &self.session - } - - /// Returns the `Approval`s of the deploy. - pub fn approvals(&self) -> &BTreeSet { - &self.approvals - } -} - -impl Display for Deploy { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "deploy {{ {}, account {}, timestamp {}, ttl {}, body hash {}, chain name {} }}", - self.hash, - self.header.account, - self.header.timestamp, - self.header.ttl, - self.header.body_hash, - self.header.chain_name - ) - } -} - -#[cfg(feature = "sse-data-testing")] -impl Deploy { - pub fn random(rng: &mut TestRng) -> Self { - let timestamp = Timestamp::random(rng); - let ttl = TimeDiff::from_millis(rng.gen_range(60_000..3_600_000)); - Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl) - } - - /// Generates a random instance but using the specified `timestamp` and `ttl`. - pub fn random_with_timestamp_and_ttl( - rng: &mut TestRng, - timestamp: Timestamp, - ttl: TimeDiff, - ) -> Self { - // Create the deploy "body", i.e. the payment and session items. - // - // We need "amount" in order to be able to get correct info via `deploy_info()`. - let payment_args = runtime_args! { - "amount" => U512::from(10), - }; - let payment = ExecutableDeployItem::StoredContractByName { - name: String::from("casper-example"), - entry_point: String::from("example-entry-point"), - args: payment_args, - }; - let session = rng.gen(); - - // Create the deploy header. - let secret_key = SecretKey::random(rng); - let account = PublicKey::from(&secret_key); - let gas_price = rng.gen_range(1..100); - let body_hash = Digest::hash(serialize_body(&payment, &session)); - let dependencies_count = rng.gen_range(0..4); - let dependencies = iter::repeat_with(|| DeployHash::new(Digest::random(rng))) - .take(dependencies_count) - .collect(); - let chain_name = String::from("casper-example"); - let header = DeployHeader { - account, - timestamp, - ttl, - gas_price, - body_hash, - dependencies, - chain_name, - }; - - // Create the deploy hash and approval. - let hash = DeployHash::new(Digest::hash(serialize_header(&header))); - let approvals = iter::once(Approval::create(&hash, &secret_key)).collect(); - - Deploy { - hash, - header, - payment, - session, - approvals, - } - } -} - -#[cfg(feature = "sse-data-testing")] -fn serialize_header(header: &DeployHeader) -> Vec { - header - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize deploy header: {}", error)) -} - -#[cfg(feature = "sse-data-testing")] -fn serialize_body(payment: &ExecutableDeployItem, session: &ExecutableDeployItem) -> Vec { - let mut buffer = payment - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize payment code: {}", error)); - buffer.extend( - session - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize session code: {}", error)), - ); - buffer -} diff --git a/types/src/digest.rs b/types/src/digest.rs deleted file mode 100644 index c76675bd..00000000 --- a/types/src/digest.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::{ - array::TryFromSliceError, - fmt::{self, Debug, Display, Formatter}, -}; - -#[cfg(feature = "sse-data-testing")] -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; -use hex_fmt::HexFmt; -#[cfg(feature = "sse-data-testing")] -use rand::Rng; -use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; - -#[cfg(feature = "sse-data-testing")] -use casper_types::bytesrepr::{self, ToBytes}; -use casper_types::{checksummed_hex, testing::TestRng}; -use utoipa::ToSchema; - -/// The output of the hash function. -#[derive(Copy, Clone, Default, Ord, PartialOrd, Eq, PartialEq, Hash, ToSchema)] -pub struct Digest([u8; Digest::LENGTH]); - -impl Digest { - /// The number of bytes in a `Digest`. - pub const LENGTH: usize = 32; -} - -impl<'a> TryFrom<&'a [u8]> for Digest { - type Error = TryFromSliceError; - - fn try_from(slice: &[u8]) -> Result { - <[u8; Digest::LENGTH]>::try_from(slice).map(Digest) - } -} - -impl Serialize for Digest { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - HexFmt(&self.0).to_string().serialize(serializer) - } else { - // This is to keep backwards compatibility with how HexForm encodes byte arrays. - // HexForm treats this like a slice. - self.0[..].serialize(serializer) - } - } -} - -impl<'de> Deserialize<'de> for Digest { - fn deserialize>(deserializer: D) -> Result { - if deserializer.is_human_readable() { - let hex_string = String::deserialize(deserializer)?; - let bytes = - checksummed_hex::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; - let data = - <[u8; Digest::LENGTH]>::try_from(bytes.as_ref()).map_err(SerdeError::custom)?; - Ok(Digest(data)) - } else { - let data = >::deserialize(deserializer)?; - Digest::try_from(data.as_slice()).map_err(D::Error::custom) - } - } -} - -impl Debug for Digest { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{}", HexFmt(&self.0)) - } -} - -impl Display for Digest { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:10}", HexFmt(&self.0)) - } -} - -#[cfg(feature = "sse-data-testing")] -impl Digest { - pub fn hash>(data: T) -> Digest { - let mut ret = [0u8; Digest::LENGTH]; - // NOTE: Safe to unwrap here because our digest length is constant and valid - let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); - hasher.update(data); - hasher.finalize_variable(|hash| ret.clone_from_slice(hash)); - Digest(ret) - } - - pub fn random(rng: &mut TestRng) -> Digest { - Digest(rng.gen()) - } - - pub fn into_vec(self) -> Vec { - self.0.to_vec() - } -} - -#[cfg(feature = "sse-data-testing")] -impl AsRef<[u8]> for Digest { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -#[cfg(feature = "sse-data-testing")] -impl From<[u8; Digest::LENGTH]> for Digest { - fn from(arr: [u8; Digest::LENGTH]) -> Self { - Digest(arr) - } -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for Digest { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} diff --git a/types/src/executable_deploy_item.rs b/types/src/executable_deploy_item.rs deleted file mode 100644 index 4b15b2ec..00000000 --- a/types/src/executable_deploy_item.rs +++ /dev/null @@ -1,328 +0,0 @@ -use hex_buffer_serde::{Hex, HexForm}; -#[cfg(feature = "sse-data-testing")] -use rand::{ - distributions::{Alphanumeric, Distribution, Standard}, - Rng, -}; -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "sse-data-testing")] -use casper_types::bytesrepr::{self, Bytes, ToBytes}; -use casper_types::{ - system::auction::ARG_AMOUNT, CLValue, ContractHash, ContractPackageHash, ContractVersion, - RuntimeArgs, U512, -}; -use utoipa::ToSchema; - -#[cfg(feature = "sse-data-testing")] -macro_rules! bx { - ($e:expr) => { - Box::new($e) - }; -} - -#[cfg(feature = "sse-data-testing")] -const TAG_LENGTH: usize = 1; -#[cfg(feature = "sse-data-testing")] -const MODULE_BYTES_TAG: u8 = 0; -#[cfg(feature = "sse-data-testing")] -const STORED_CONTRACT_BY_HASH_TAG: u8 = 1; -#[cfg(feature = "sse-data-testing")] -const STORED_CONTRACT_BY_NAME_TAG: u8 = 2; -#[cfg(feature = "sse-data-testing")] -const STORED_VERSIONED_CONTRACT_BY_HASH_TAG: u8 = 3; -#[cfg(feature = "sse-data-testing")] -const STORED_VERSIONED_CONTRACT_BY_NAME_TAG: u8 = 4; -#[cfg(feature = "sse-data-testing")] -const TRANSFER_TAG: u8 = 5; -#[cfg(feature = "sse-data-testing")] -const MAX_PAYMENT_AMOUNT: u64 = 2_500_000_000; - -/// The payment or session code of a [`Deploy`]. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, ToSchema)] -#[serde(deny_unknown_fields)] -pub enum ExecutableDeployItem { - /// Raw bytes of compiled Wasm code, which must include a `call` entry point, and the arguments - /// to call at runtime. - ModuleBytes { - /// The compiled Wasm bytes. - #[schema(value_type = String)] - module_bytes: Bytes, - /// The arguments to be passed to the entry point at runtime. - args: RuntimeArgs, - }, - /// A contract stored in global state, referenced by its "hash", along with the entry point and - /// arguments to call at runtime. - StoredContractByHash { - /// The contract's identifier. - #[serde(with = "HexForm")] - hash: ContractHash, - /// The contract's entry point to be called at runtime. - entry_point: String, - /// The arguments to be passed to the entry point at runtime. - args: RuntimeArgs, - }, - /// A contract stored in global state, referenced by a named key existing in the `Deploy`'s - /// account context, along with the entry point and arguments to call at runtime. - StoredContractByName { - /// The named of the named key under which the contract is referenced. - name: String, - /// The contract's entry point to be called at runtime. - entry_point: String, - /// The arguments to be passed to the entry point at runtime. - args: RuntimeArgs, - }, - /// A versioned contract stored in global state, referenced by its "hash", along with the entry - /// point and arguments to call at runtime. - StoredVersionedContractByHash { - /// The contract package's identifier. - #[serde(with = "HexForm")] - hash: ContractPackageHash, - /// The version of the contract to call. If `None`, the highest enabled version is used. - version: Option, - /// The contract's entry point to be called at runtime. - entry_point: String, - /// The arguments to be passed to the entry point at runtime. - args: RuntimeArgs, - }, - /// A versioned contract stored in global state, referenced by a named key existing in the - /// `Deploy`'s account context, along with the entry point and arguments to call at runtime. - StoredVersionedContractByName { - /// The named of the named key under which the contract package is referenced. - name: String, - /// The version of the contract to call. If `None`, the highest enabled version is used. - version: Option, - /// The contract's entry point to be called at runtime. - entry_point: String, - /// The arguments to be passed to the entry point at runtime. - args: RuntimeArgs, - }, - /// A native transfer which does not contain or reference any Wasm code. - Transfer { - /// The arguments to be passed to the native transfer entry point at runtime. - args: RuntimeArgs, - }, -} - -#[cfg(feature = "sse-data-testing")] -impl ExecutableDeployItem { - fn fields_serialized_length(&self) -> usize { - let components: Vec> = match self { - ExecutableDeployItem::ModuleBytes { module_bytes, args } => { - vec![bx!(module_bytes), bx!(args)] - } - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - } => vec![bx!(hash), bx!(entry_point), bx!(args)], - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - } => vec![bx!(name), bx!(entry_point), bx!(args)], - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - } => vec![bx!(hash), bx!(version), bx!(entry_point), bx!(args)], - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - } => vec![bx!(name), bx!(version), bx!(entry_point), bx!(args)], - ExecutableDeployItem::Transfer { args } => vec![bx!(args)], - }; - components - .into_iter() - .map(|to_bytes| to_bytes.serialized_length()) - .sum() - } -} - -#[cfg(feature = "sse-data-testing")] -impl ToBytes for ExecutableDeployItem { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - ExecutableDeployItem::ModuleBytes { module_bytes, args } => { - write_module_bytes(&mut buffer, module_bytes, args)? - } - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - } => write_stored_contract(&mut buffer, hash, entry_point, args)?, - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - } => write_stored_contract_by_name(&mut buffer, name, entry_point, args)?, - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - } => write_versioned_contract_by_hash(&mut buffer, hash, version, entry_point, args)?, - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - } => write_versioned_contract_by_name(&mut buffer, name, version, entry_point, args)?, - ExecutableDeployItem::Transfer { args } => write_transfer(&mut buffer, args)?, - } - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - TAG_LENGTH + self.fields_serialized_length() - } -} - -#[cfg(feature = "sse-data-testing")] -fn write_transfer(buffer: &mut Vec, args: &RuntimeArgs) -> Result<(), bytesrepr::Error> { - buffer.insert(0, TRANSFER_TAG); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -fn write_versioned_contract_by_name( - buffer: &mut Vec, - name: &String, - version: &Option, - entry_point: &String, - args: &RuntimeArgs, -) -> Result<(), bytesrepr::Error> { - buffer.insert(0, STORED_VERSIONED_CONTRACT_BY_NAME_TAG); - buffer.extend(name.to_bytes()?); - buffer.extend(version.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -fn write_versioned_contract_by_hash( - buffer: &mut Vec, - hash: &ContractPackageHash, - version: &Option, - entry_point: &String, - args: &RuntimeArgs, -) -> Result<(), bytesrepr::Error> { - buffer.insert(0, STORED_VERSIONED_CONTRACT_BY_HASH_TAG); - buffer.extend(hash.to_bytes()?); - buffer.extend(version.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -fn write_stored_contract_by_name( - buffer: &mut Vec, - name: &String, - entry_point: &String, - args: &RuntimeArgs, -) -> Result<(), bytesrepr::Error> { - buffer.insert(0, STORED_CONTRACT_BY_NAME_TAG); - buffer.extend(name.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -fn write_stored_contract( - buffer: &mut Vec, - hash: &ContractHash, - entry_point: &String, - args: &RuntimeArgs, -) -> Result<(), bytesrepr::Error> { - buffer.insert(0, STORED_CONTRACT_BY_HASH_TAG); - buffer.extend(hash.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -fn write_module_bytes( - buffer: &mut Vec, - module_bytes: &Bytes, - args: &RuntimeArgs, -) -> Result<(), bytesrepr::Error> { - buffer.insert(0, MODULE_BYTES_TAG); - buffer.extend(module_bytes.to_bytes()?); - buffer.extend(args.to_bytes()?); - Ok(()) -} - -#[cfg(feature = "sse-data-testing")] -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> ExecutableDeployItem { - let mut args = RuntimeArgs::new(); - let _ = args.insert(random_string(rng), Bytes::from(random_bytes(rng))); - match rng.gen_range(0..5) { - 0 => ExecutableDeployItem::ModuleBytes { - module_bytes: random_bytes(rng).into(), - args, - }, - 1 => ExecutableDeployItem::StoredContractByHash { - hash: ContractHash::new(rng.gen()), - entry_point: random_string(rng), - args, - }, - 2 => ExecutableDeployItem::StoredContractByName { - name: random_string(rng), - entry_point: random_string(rng), - args, - }, - 3 => ExecutableDeployItem::StoredVersionedContractByHash { - hash: ContractPackageHash::new(rng.gen()), - version: rng.gen(), - entry_point: random_string(rng), - args, - }, - 4 => ExecutableDeployItem::StoredVersionedContractByName { - name: random_string(rng), - version: rng.gen(), - entry_point: random_string(rng), - args, - }, - 5 => random_transfer(rng), - _ => unreachable!(), - } - } -} - -#[cfg(feature = "sse-data-testing")] -fn random_string(rng: &mut R) -> String { - rng.sample_iter(&Alphanumeric) - .take(20) - .map(char::from) - .collect() -} - -#[cfg(feature = "sse-data-testing")] -fn random_bytes(rng: &mut R) -> Vec { - let mut bytes = vec![0u8; rng.gen_range(0..100)]; - rng.fill_bytes(bytes.as_mut()); - bytes -} - -#[cfg(feature = "sse-data-testing")] -fn random_transfer(rng: &mut R) -> ExecutableDeployItem { - let amount = rng.gen_range(MAX_PAYMENT_AMOUNT..1_000_000_000_000_000); - let mut transfer_args = RuntimeArgs::new(); - transfer_args.insert_cl_value( - ARG_AMOUNT, - CLValue::from_t(U512::from(amount)).expect("should get CLValue from U512"), - ); - ExecutableDeployItem::Transfer { - args: transfer_args, - } -} diff --git a/types/src/filter.rs b/types/src/filter.rs index 58779b7c..93776fc2 100644 --- a/types/src/filter.rs +++ b/types/src/filter.rs @@ -4,18 +4,12 @@ use std::fmt::{Display, Formatter}; #[derive(Hash, Eq, PartialEq, Debug, Clone)] pub enum Filter { Events, - Main, - Deploys, - Sigs, } impl Display for Filter { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Filter::Events => write!(f, "events"), - Filter::Main => write!(f, "events/main"), - Filter::Deploys => write!(f, "events/deploys"), - Filter::Sigs => write!(f, "events/sigs"), } } } diff --git a/types/src/legacy_sse_data/fixtures.rs b/types/src/legacy_sse_data/fixtures.rs new file mode 100644 index 00000000..6158175c --- /dev/null +++ b/types/src/legacy_sse_data/fixtures.rs @@ -0,0 +1,893 @@ +use super::{structs, LegacySseData}; +use crate::{ + sse_data::SseData, + testing::{parse_block_hash, parse_digest, parse_public_key}, +}; +use casper_types::{ + system::auction::ValidatorWeights, testing::TestRng, BlockHash, BlockV2, Deploy, DeployHash, + Digest, EraEndV2, EraId, ProtocolVersion, PublicKey, RewardedSignatures, + SingleBlockRewardedSignatures, TestBlockBuilder, TimeDiff, Timestamp, Transaction, + TransactionV1, TransactionV1Hash, U512, +}; +use rand::Rng; +use std::{ + collections::{BTreeMap, BTreeSet}, + str::FromStr, +}; + +pub fn legacy_block_added() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_BLOCK_ADDED).unwrap() +} + +pub fn legacy_block_added_from_v2() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_BLOCK_ADDED_FROM_V2).unwrap() +} + +pub fn block_added_v1() -> SseData { + serde_json::from_str(RAW_BLOCK_ADDED_V1).unwrap() +} + +pub fn block_added_v2() -> SseData { + serde_json::from_str(RAW_BLOCK_ADDED_V2).unwrap() +} + +pub fn api_version() -> SseData { + serde_json::from_str(RAW_API_VERSION).unwrap() +} + +pub fn legacy_api_version() -> LegacySseData { + serde_json::from_str(RAW_API_VERSION).unwrap() +} + +pub fn finality_signature_v1() -> SseData { + serde_json::from_str(RAW_FINALITY_SIGNATURE_V1).unwrap() +} + +pub fn finality_signature_v2() -> SseData { + serde_json::from_str(RAW_FINALITY_SIGNATURE_V2).unwrap() +} + +pub fn transaction_accepted() -> SseData { + serde_json::from_str(RAW_TRANSACTION_ACCEPTED).unwrap() +} + +pub fn deploy_accepted() -> SseData { + serde_json::from_str(RAW_DEPLOY_ACCEPTED).unwrap() +} + +pub fn legacy_deploy_accepted() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_DEPLOY_ACCEPTED).unwrap() +} + +pub fn deploy_expired() -> SseData { + serde_json::from_str(RAW_DEPLOY_EXPIRED).unwrap() +} + +pub fn transaction_expired() -> SseData { + serde_json::from_str(RAW_TRANSACTION_EXPIRED).unwrap() +} + +pub fn legacy_deploy_expired() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_DEPLOY_EXPIRED).unwrap() +} + +pub fn legacy_finality_signature() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_FINALITY_SIGNATURE).unwrap() +} + +pub fn fault() -> SseData { + serde_json::from_str(RAW_FAULT).unwrap() +} + +pub fn legacy_fault() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_FAULT).unwrap() +} + +pub fn deploy_processed() -> SseData { + serde_json::from_str(RAW_DEPLOY_PROCESSED).unwrap() +} + +pub fn legacy_deploy_processed() -> LegacySseData { + serde_json::from_str(RAW_LEGACY_DEPLOY_PROCESSED).unwrap() +} + +pub fn parent_hash() -> BlockHash { + parse_block_hash("90a4ade2849634e9c1ad0e02cb30645d0984056f68075cad8f6cad2b42a824ba") +} + +pub fn state_root_hash() -> Digest { + parse_digest("9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849") +} + +pub fn timestamp() -> Timestamp { + Timestamp::from_str("2020-08-07T01:30:25.521Z").unwrap() +} + +pub fn proposer() -> PublicKey { + parse_public_key("0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e") +} + +#[allow(clippy::too_many_arguments)] +pub fn block_v2_with_transactions( + rng: &mut TestRng, + parent_hash: BlockHash, + state_root_hash: Digest, + timestamp: Timestamp, + era_id: EraId, + height: u64, + proposer: PublicKey, + transactions: Vec<&Transaction>, +) -> BlockV2 { + let mut validator_weights = ValidatorWeights::new(); + let key_1 = + parse_public_key("0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b"); + let key_2 = + parse_public_key("02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c"); + let key_3 = + parse_public_key("0202fd52dbda97f41def3e3252704d5f8f5adbec1919368282e02e9500bd88845a80"); + let key_4 = + parse_public_key("02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027e"); + validator_weights.insert(key_1.clone(), U512::from_dec_str("1").unwrap()); + validator_weights.insert(key_2.clone(), U512::from_dec_str("2").unwrap()); + let mut public_keys = BTreeSet::new(); + public_keys.insert(key_1.clone()); + public_keys.insert(key_4.clone()); + let all_validators = vec![&key_1, &key_2, &key_3, &key_4]; + let single_block_sigs = + SingleBlockRewardedSignatures::from_validator_set(&public_keys, all_validators); + let rewarded_signatures = RewardedSignatures::new(vec![single_block_sigs]); + TestBlockBuilder::default() + .parent_hash(parent_hash) + .state_root_hash(state_root_hash) + .timestamp(timestamp) + .era(era_id) + .height(height) + .protocol_version(ProtocolVersion::V2_0_0) + .proposer(proposer) + .switch_block(true) + .validator_weights(validator_weights) + .rewarded_signatures(rewarded_signatures) + .transactions(transactions) + .build(rng) +} + +pub fn sample_transactions( + rng: &mut TestRng, +) -> ( + Vec, + DeployHash, + TransactionV1Hash, + DeployHash, + TransactionV1Hash, + TransactionV1Hash, + TransactionV1Hash, +) { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..300)); + + let deploy = Deploy::random_with_valid_session_package_by_name(rng); + let standard_deploy_hash = *deploy.hash(); + let standard_deploy = Transaction::Deploy(deploy); + + let version_1 = TransactionV1::random(rng); + let standard_version_1_hash = *version_1.hash(); + let standard_version_1 = Transaction::V1(version_1); + + let deploy = Deploy::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl); + let mint_deploy_hash = *deploy.hash(); + let mint_deploy = Transaction::Deploy(deploy); + + let version_1 = TransactionV1::random_transfer(rng, Some(timestamp), Some(ttl)); + let mint_version_1_hash = *version_1.hash(); + let mint_version_1 = Transaction::V1(version_1); + + let version_1 = TransactionV1::random_install_upgrade(rng, Some(timestamp), Some(ttl)); + let install_upgrade_v1_hash = *version_1.hash(); + let install_upgrade_v1 = Transaction::V1(version_1); + + let version_1 = TransactionV1::random_wasm(rng, Some(timestamp), Some(ttl)); + let auction_v1_hash = *version_1.hash(); + let auction_v1 = Transaction::V1(version_1); + + ( + vec![ + standard_deploy, + standard_version_1, + mint_deploy, + mint_version_1, + install_upgrade_v1, + auction_v1, + ], + standard_deploy_hash, + standard_version_1_hash, + mint_deploy_hash, + mint_version_1_hash, + install_upgrade_v1_hash, + auction_v1_hash, + ) +} + +pub fn block_v2( + rng: &mut TestRng, + parent_hash: BlockHash, + state_root_hash: Digest, + timestamp: Timestamp, + era_id: EraId, + height: u64, + proposer: PublicKey, +) -> BlockV2 { + block_v2_with_transactions( + rng, + parent_hash, + state_root_hash, + timestamp, + era_id, + height, + proposer, + vec![], + ) +} + +#[allow(clippy::too_many_arguments)] +pub fn block_v1_no_deploys_no_era( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + timestamp: Timestamp, + era_id: EraId, + height: u64, + proposer: PublicKey, + block_hash: BlockHash, +) -> structs::BlockV1 { + structs::BlockV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + None, + timestamp, + era_id, + height, + ProtocolVersion::V2_0_0, + proposer, + block_hash, + vec![], + vec![], + ) +} + +pub fn era_end_v2() -> EraEndV2 { + let mut next_era_validator_weights = BTreeMap::new(); + next_era_validator_weights.insert( + parse_public_key("0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b"), + U512::from_dec_str("1").unwrap(), + ); + next_era_validator_weights.insert( + parse_public_key("02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c"), + U512::from_dec_str("2").unwrap(), + ); + let mut rewards = BTreeMap::new(); + rewards.insert( + parse_public_key("01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25"), + vec![U512::from_dec_str("129457537").unwrap()], + ); + EraEndV2::new( + vec![ + parse_public_key("010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b50"), + parse_public_key( + "02037c17d279d6e54375f7cfb3559730d5434bfedc8638a3f95e55f6e85fc9e8f611", + ), + parse_public_key( + "02026d4b741a0ece4b3d6d61294a8db28a28dbd734133694582d38f240686ec61d05", + ), + ], + vec![parse_public_key( + "010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b51", + )], + next_era_validator_weights, + rewards, + 1, + ) +} + +pub fn era_end_v2_with_reward_exceeding_u64() -> EraEndV2 { + let mut next_era_validator_weights = BTreeMap::new(); + next_era_validator_weights.insert( + parse_public_key("0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b"), + U512::from_dec_str("1").unwrap(), + ); + next_era_validator_weights.insert( + parse_public_key("02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c"), + U512::from_dec_str("2").unwrap(), + ); + let mut rewards = BTreeMap::new(); + rewards.insert( + parse_public_key("01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25"), + vec![U512::from_dec_str("18446744073709551616").unwrap()], + ); + EraEndV2::new( + vec![ + parse_public_key("010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b50"), + parse_public_key( + "02037c17d279d6e54375f7cfb3559730d5434bfedc8638a3f95e55f6e85fc9e8f611", + ), + parse_public_key( + "02026d4b741a0ece4b3d6d61294a8db28a28dbd734133694582d38f240686ec61d05", + ), + ], + vec![parse_public_key( + "010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b51", + )], + next_era_validator_weights, + rewards, + 1, + ) +} + +const RAW_API_VERSION: &str = r#"{"ApiVersion":"2.0.0"}"#; + +const RAW_FINALITY_SIGNATURE_V2: &str = r#"{ + "FinalitySignature": { + "V2": { + "block_hash": "45d7c385cba0a880cbc0068ccc6c58111d057d8190850b744c8e0450a24639d4", + "block_height": 60923, + "era_id": 139, + "chain_name_hash": "f087a92e6e7077b3deb5e00b14a904e34c7068a9410365435bc7ca5d3ac64301", + "signature": "01cc39996b8410b500a61c97f888b381546de77e13e8af1a509d3305021b079c1d54b29f3eac8370eb40af2a6419b81427e09cd3c2e72567357fa2120abb0bba06", + "public_key": "017536433a73f7562526f3e9fcb8d720428ae2d28788a9909f3c6f637a9d848a4b" + } + } + }"#; + +const RAW_FINALITY_SIGNATURE_V1: &str = r#"{ +"FinalitySignature": { + "V1": { + "block_hash": "45d7c385cba0a880cbc0068ccc6c58111d057d8190850b744c8e0450a24639d4", + "era_id": 139, + "signature": "01cc39996b8410b500a61c97f888b381546de77e13e8af1a509d3305021b079c1d54b29f3eac8370eb40af2a6419b81427e09cd3c2e72567357fa2120abb0bba06", + "public_key": "017536433a73f7562526f3e9fcb8d720428ae2d28788a9909f3c6f637a9d848a4b" + } +} +}"#; + +const RAW_LEGACY_FINALITY_SIGNATURE: &str = r#"{ + "FinalitySignature": { + "block_hash": "45d7c385cba0a880cbc0068ccc6c58111d057d8190850b744c8e0450a24639d4", + "era_id": 139, + "signature": "01cc39996b8410b500a61c97f888b381546de77e13e8af1a509d3305021b079c1d54b29f3eac8370eb40af2a6419b81427e09cd3c2e72567357fa2120abb0bba06", + "public_key": "017536433a73f7562526f3e9fcb8d720428ae2d28788a9909f3c6f637a9d848a4b" + } + }"#; + +const RAW_TRANSACTION_ACCEPTED: &str = r#" +{ + "TransactionAccepted": { + "Version1": { + "hash": "2084a40f58874fb2997e029e61ec55e3d5a6cd5f6de77a1d42dcaf21aeddc760", + "payload": { + "initiator_addr": { + "PublicKey": "020394489ced801f72d5ae25d66dfa4e5f7d045fa3f16085d780be901054d8386295" + }, + "timestamp": "2020-08-07T01:27:53.316Z", + "ttl": "16h 57m 31s 19ms", + "chain_name": "⸻⋉◬⸗ⶨ⼄≙⡫⨁ⶃℍ⊨⇏ⴲⲋ⪝⣬ⴂ⨨⪯⿉⺙⚚⻰⒯ⶖ⟽⬪❴⴯╽♥⅏⏵❲⃽ⶁ⾠⸗◩⋑Ⅹ♼⺓⊻⼠Ⓩ∇Ⅺ⸔◘⠝◓⚾◯⦁★⢹␄⍆⨿⵮⭭⮛⸹⃻⹶⎶⟆⛎⤑₇⩐╨⋸⠸₈⥡ⷔ⹪⤛⭺⵫Ⲗ⃁⪏⫵⚎⁘⦳☉␛Ⲹ⥝⇡Ⰰ⫂⁎⍆⼸", + "pricing_mode": { + "Fixed": { "additional_computation_factor": 0, "gas_price_tolerance": 5 } + }, + "fields": { + "args": { + "Named": [ + [ + "xyz", + { + "bytes": "0d0000001af81d860f238f832b8f8e648c", + "cl_type": { + "List": "U8" + } + } + ] + ] + }, + "entry_point": "AddBid", + "scheduling": { + "FutureEra": 195120 + }, + "target": "Native" + } + }, + "approvals": [ + { + "signer": "020394489ced801f72d5ae25d66dfa4e5f7d045fa3f16085d780be901054d8386295", + "signature": "02a679a7db9f6123672309812706b2e2316c66fc16ac6ff6745539f1708264dfc33eb72883f142c90c1ab183191a83a474394203aaf6b21dcaf7b559137ccc604b" + } + ] +} + } +} +"#; + +const RAW_LEGACY_DEPLOY_ACCEPTED: &str = r#" +{ + "DeployAccepted": { + "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", + "header": { + "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "timestamp": "2020-08-07T01:28:27.360Z", + "ttl": "4m 22s", + "gas_price": 72, + "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", + "dependencies": [], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "U512", + "bytes": "0400f90295", + "parsed": "2500000000" + } + ] + ] + } + }, + "session": { + "StoredContractByHash": { + "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", + "entry_point": "8sY9fUUCwoiFZmxKo8kj", + "args": [ + [ + "YbZWtEuL4D6oMTJmUWvj", + { + "cl_type": { + "List": "U8" + }, + "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", + "parsed": [ + 144, + 159, + 254, + 120, + 7 + ] + } + ] + ] + } + }, + "approvals": [ + { + "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" + } + ] + } +}"#; + +const RAW_DEPLOY_ACCEPTED: &str = r#" +{ + "TransactionAccepted": { + "Deploy": { + "hash": "5a7709969c210db93d3c21bf49f8bf705d7c75a01609f606d04b0211af171d43", + "header": { + "account": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "timestamp": "2020-08-07T01:28:27.360Z", + "ttl": "4m 22s", + "gas_price": 72, + "body_hash": "aa2a111c086628a161001160756c5884e32fde0356bb85f484a3e55682ad089f", + "dependencies": [], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "U512", + "bytes": "0400f90295", + "parsed": "2500000000" + } + ] + ] + } + }, + "session": { + "StoredContractByHash": { + "hash": "dfb621e7012df48fe1d40fd8015b5e2396c477c9587e996678551148a06d3a89", + "entry_point": "8sY9fUUCwoiFZmxKo8kj", + "args": [ + [ + "YbZWtEuL4D6oMTJmUWvj", + { + "cl_type": { + "List": "U8" + }, + "bytes": "5a000000909ffe7807b03a5db0c3c183648710db16d408d8425a4e373fc0422a4efed1ab0040bc08786553fcac4521528c9fafca0b0fb86f4c6e9fb9db7a1454dda8ed612c4ea4c9a6378b230ae1e3c236e37d6ebee94339a56cb4be582a", + "parsed": [ + 144, + 159, + 254, + 120, + 7 + ] + } + ] + ] + } + }, + "approvals": [ + { + "signer": "02022c07e061d6e0b43bbaa25717b021c2ddc0f701a223946a0883b57ae842917438", + "signature": "025d0a7ba37bebe6774681ca5adecb70fa4eef56821eb344bf0f6867e171a899a87edb2b8bf70f2cb47a1670a6baf2cded1fad535ee53a2f65da91c82ebf30945b" + } + ] + } + } +} +"#; + +const RAW_DEPLOY_EXPIRED: &str = r#" +{ + "TransactionExpired": { + "transaction_hash": { + "Deploy": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } + } +} +"#; + +const RAW_TRANSACTION_EXPIRED: &str = r#" +{ + "TransactionExpired": { + "transaction_hash": { + "Version1": "8c22ae866be3287b6374592083b17cbaf4b0452d7a55adb2a4e53bb0295c0d76" + } + } +} +"#; + +const RAW_LEGACY_DEPLOY_EXPIRED: &str = r#" +{ + "DeployExpired": { + "deploy_hash": "565d7147e28be402c34208a133fd59fde7ac785ae5f0298cb5fb7adfb1b054a8" + } +} +"#; + +const RAW_FAULT: &str = r#" +{ + "Fault": { + "era_id": 769794, + "public_key": "02034ce1acbceeb5eb2b20eeeef9965e3ca8e7a95655f2089342bcbb51319a0d70d1", + "timestamp": "2020-08-07T01:30:59.692Z" + } +} +"#; + +const RAW_LEGACY_FAULT: &str = r#" +{ + "Fault": { + "era_id": 769794, + "public_key": "02034ce1acbceeb5eb2b20eeeef9965e3ca8e7a95655f2089342bcbb51319a0d70d1", + "timestamp": "2020-08-07T01:30:59.692Z" + } +} +"#; + +const RAW_LEGACY_BLOCK_ADDED: &str = r#" +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "header": { + "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", + "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", + "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", + "random_bit": true, + "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", + "era_end": { + "era_report": { + "equivocators": [ + "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", + "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" + ], + "rewards": [ + { + "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", + "amount": 129457537 + } + ], + "inactive_validators": [] + }, + "next_era_validator_weights": [ + { + "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", + "weight": "1" + }, + { + "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + "weight": "2" + } + ] + }, + "timestamp": "2024-04-25T20:00:35.640Z", + "era_id": 601701, + "height": 6017012, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", + "deploy_hashes": [ + "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", + "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" + ], + "transfer_hashes": [ + "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", + "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" + ] + } + } + } +}"#; + +const RAW_BLOCK_ADDED_V1: &str = r#" +{ + "BlockAdded": { + "block_hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "block": { + "Version1": { + "hash": "d59359690ca5a251b513185da0767f744e77645adec82bb6ff785a89edc7591c", + "header": { + "parent_hash": "90ca56a697f8b1b19cba08c642fd7f04669b8cd49bb9d652fca989f8a9f8bcea", + "state_root_hash": "9cce223fdbeab41dbbcf0b62f3fd857373131378d51776de26bb9f4fefe1e849", + "body_hash": "5f37be399c15b2394af48243ce10a62a7d12769dc5f7740b18ad3bf55bde5271", + "random_bit": true, + "accumulated_seed": "b3e1930565a80a874a443eaadefa1a340927fb8b347729bbd93e93935a47a9e4", + "era_end": { + "era_report": { + "equivocators": [ + "0203c9da857cfeccf001ce00720ae2e0d083629858b60ac05dd285ce0edae55f0c8e", + "02026fb7b629a2ec0132505cdf036f6ffb946d03a1c9b5da57245af522b842f145be" + ], + "rewards": [ + { + "validator": "01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25", + "amount": 129457537 + } + ], + "inactive_validators": [] + }, + "next_era_validator_weights": [ + { + "validator": "0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b", + "weight": "1" + }, + { + "validator": "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + "weight": "2" + } + ] + }, + "timestamp": "2024-04-25T20:00:35.640Z", + "era_id": 601701, + "height": 6017012, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "0203426736da2554ebf1f8ee1d2ce4ab11b1e33419d7dfc1ce2fe1945faf00bacc9e", + "deploy_hashes": [ + "06950e4374dc88685634ec30bcddd68e6b46c109ccf6d29e2dfcf5367df75571", + "27a89dd58e6297a5244342b68b117afe2555131b896ad6ed4321edcd4130ae7b" + ], + "transfer_hashes": [ + "3e30b6c1c5dbca9277425846b42dc832cd3d8ce889c38d6bfc8bd95b3e1c403e", + "c990ba47146270655eaacc53d4115cbd980697f3d4e9c76bccfdfce82af6ce08" + ] + } + } + } + } +} +"#; + +const RAW_BLOCK_ADDED_V2: &str = r#"{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "Version2": { + "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "header": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", + "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", + "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", + "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", + "random_bit": false, + "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", + "era_end": { + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "inactive_validators": ["01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56"], + "next_era_validator_weights": [ + {"validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", "weight": "1"}, + {"validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", "weight": "2"} + ], + "rewards": { + "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc": ["749546792"], + "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2": ["788342677"], + "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec": ["86241635"], + "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c": ["941794198"] + }, + "next_era_gas_price": 1 + }, + "timestamp": "2024-04-25T20:31:39.895Z", + "era_id": 419571, + "height": 4195710, + "protocol_version": "1.0.0", + "current_gas_price": 1 + }, + "body": { + "transactions": { + "0": [{"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80"}, {"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81"}, {"Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e90"}], + "1": [{"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e82"}, {"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e83"}, {"Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e91"}], + "2": [{"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e84"}, {"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e85"}, {"Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e92"}], + "3": [{"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e86"}, {"Deploy": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e87"}, {"Version1": "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e93"}] + }, + "rewarded_signatures": [[240],[0],[0]] + } + } + } + } +}"#; + +const RAW_LEGACY_BLOCK_ADDED_FROM_V2: &str = r#"{ + "BlockAdded": { + "block_hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "block": { + "hash": "2df9fb8909443fba928ed0536a79780cdb4557d0c05fdf762a1fd61141121422", + "header": { + "parent_hash": "b8f5e9afd2e54856aa1656f962d07158f0fdf9cfac0f9992875f31f6bf2623a2", + "state_root_hash": "cbf02d08bb263aa8915507c172b5f590bbddcd68693fb1c71758b5684b011730", + "body_hash": "6041ab862a1e14a43a8e8a9a42dad27091915a337d18060c22bd3fe7b4f39607", + "random_bit": false, + "accumulated_seed": "a0e424710f4fba036ba450b40f2bd7a842b176cf136f3af1952a2a13eb02616c", + "era_end": { + "era_report": { + "equivocators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc54", + "0203e4532e401326892aa8ebc16b6986bd35a6c96a1f16c28db67fd7e87cb6913817", + "020318a52d5b2d545def8bf0ee5ea7ddea52f1fbf106c8b69848e40c5460e20c9f62" + ], + "rewards": [ + { + "validator": "01f6bbd4a6fd10534290c58edb6090723d481cea444a8e8f70458e5136ea8c733c", + "amount": 941794198 + }, + { + "validator": "02028002c063228ff4e9d22d69154c499b86a4f7fdbf1d1e20f168b62da537af64c2", + "amount": 788342677 + }, + { + "validator": "02028b18c949d849b377988ea5191b39340975db25f8b80f37cc829c9f79dbfb19fc", + "amount": 749546792 + }, + { + "validator": "02038efa405f648c72f36b0e5f37db69ab213d44404591b24de21383d8cc161101ec", + "amount": 86241635 + } + ], + "inactive_validators": [ + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc55", + "01cc718e9dea652577bffad3471d0db7d03ba30923780a2a8fd1e3dd9b4e72dc56" + ] + }, + "next_era_validator_weights": [ + { + "validator": "0102ffd4d2812d68c928712edd012fbcad54367bc6c5c254db22cf696772856566", + "weight": "2" + }, + { + "validator": "02038b238d774c3c4228a0430e3a078e1a2533f9c87cccbcf695637502d8d6057a63", + "weight": "1" + } + ] + }, + "timestamp": "2024-04-25T20:31:39.895Z", + "era_id": 419571, + "height": 4195710, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "01d3eec0445635f136ae560b43e9d8f656a6ba925f01293eaf2610b39ebe0fc28d", + "deploy_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e82", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e83", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e84", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e85", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e86", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e87" + ], + "transfer_hashes": [ + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e80", + "58aca0009fc41bd045d303db9e9f07416ff1fd8c76ecd98545eedf86f9459e81" + ] + } + } + } +}"#; + +const RAW_DEPLOY_PROCESSED: &str = r#"{ + "TransactionProcessed": { + "transaction_hash": { + "Deploy": "1660c3971ba4283583e4abc1cbef5ea0845a37300ae40a8728ae2684c1b1a5d2" + }, + "initiator_addr": { + "PublicKey": "0203d98e2ec694b981cebd7cf35ac531d8717e86dc35912f2536b8807ad550621147" + }, + "timestamp": "2020-08-07T01:24:27.283Z", + "ttl": "4h 11m 524ms", + "block_hash": "e91cd454e94f2e3d155fa71105251cd0905e91067872a743d5d22974caabab06", + "execution_result": { + "Version2": { + "initiator": { + "PublicKey": "0203d98e2ec694b981cebd7cf35ac531d8717e86dc35912f2536b8807ad550621147" + }, + "error_message": "Error message 18290057561582514745", + "limit": "11209375253254652626", + "consumed": "10059559442643035623", + "cost": "44837501013018610504", + "transfers": [], + "effects": [], + "size_estimate": 521 + } + }, + "messages": [ + { + "hash_addr": "7da5cc7bb0035d5e2a05014c4ac8e23ec2b198988da9401411040efddb0108ef", + "entity_hash": "entity-system-fbd35eaf71f295b3bf35a295e705f629bbea28cefedfc109eda1205fb3650bad", + "message": { + "String": "cs5rHI2Il75nRJ7GLs7BQM5CilvzMqu0dgFuj57FkqEs3431LJ1qfsZActb05hzR" + }, + "topic_name": "7DnsHE3NL4PRaYuPcY90bECdnd7D78lF", + "topic_name_hash": "f75840ed75ad1c85856de00d2ca865a7608b46a933d81c64ff8907ec620d6e83", + "topic_index": 2222189259, + "block_index": 11650550294672125610 + } + ] + } +}"#; + +const RAW_LEGACY_DEPLOY_PROCESSED: &str = r#"{ + "DeployProcessed": { + "deploy_hash": "1660c3971ba4283583e4abc1cbef5ea0845a37300ae40a8728ae2684c1b1a5d2", + "account": "0203d98e2ec694b981cebd7cf35ac531d8717e86dc35912f2536b8807ad550621147", + "timestamp": "2020-08-07T01:24:27.283Z", + "ttl": "4h 11m 524ms", + "dependencies": [], + "block_hash": "e91cd454e94f2e3d155fa71105251cd0905e91067872a743d5d22974caabab06", + "execution_result": { + "Failure": { + "effect": { + "operations": [], + "transforms": [] + }, + "transfers": [], + "cost": "44837501013018610504", + "error_message": "Error message 18290057561582514745" + } + } + } +}"#; diff --git a/types/src/legacy_sse_data/mod.rs b/types/src/legacy_sse_data/mod.rs new file mode 100644 index 00000000..26fa90c5 --- /dev/null +++ b/types/src/legacy_sse_data/mod.rs @@ -0,0 +1,273 @@ +use self::{ + translate_block_added::{build_default_block_added_translator, BlockAddedTranslator}, + translate_execution_result::{ + build_default_execution_result_translator, ExecutionResultV2Translator, + }, +}; +use crate::sse_data::SseData; +use casper_types::{ + execution::{ExecutionResult, ExecutionResultV1}, + BlockHash, Deploy, DeployHash, EraId, FinalitySignature, FinalitySignatureV1, + FinalitySignatureV2, InitiatorAddr, ProtocolVersion, PublicKey, Signature, TimeDiff, Timestamp, + Transaction, TransactionHash, +}; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +mod fixtures; +mod structs; +mod translate_block_added; +mod translate_deploy_hashes; +mod translate_execution_result; + +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +pub enum LegacySseData { + ApiVersion(ProtocolVersion), + DeployAccepted(Deploy), + DeployProcessed { + deploy_hash: Box, + account: Box, + timestamp: Timestamp, + ttl: TimeDiff, + dependencies: Vec, + block_hash: Box, + execution_result: Box, + }, + DeployExpired { + deploy_hash: DeployHash, + }, + BlockAdded { + block_hash: BlockHash, + block: structs::BlockV1, + }, + Fault { + era_id: EraId, + public_key: PublicKey, + timestamp: Timestamp, + }, + FinalitySignature(LegacyFinalitySignature), +} + +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +pub struct LegacyFinalitySignature { + block_hash: BlockHash, + era_id: EraId, + signature: Signature, + public_key: PublicKey, +} + +impl LegacyFinalitySignature { + fn from_v1(finality_signature: &FinalitySignatureV1) -> Self { + LegacyFinalitySignature { + block_hash: *finality_signature.block_hash(), + era_id: finality_signature.era_id(), + signature: *finality_signature.signature(), + public_key: finality_signature.public_key().clone(), + } + } + + fn from_v2(finality_signature: &FinalitySignatureV2) -> Self { + LegacyFinalitySignature { + block_hash: *finality_signature.block_hash(), + era_id: finality_signature.era_id(), + signature: *finality_signature.signature(), + public_key: finality_signature.public_key().clone(), + } + } +} + +impl LegacySseData { + pub fn from(data: &SseData) -> Option { + match data { + SseData::ApiVersion(protocol_version) => { + Some(LegacySseData::ApiVersion(*protocol_version)) + } + SseData::SidecarVersion(_) => None, + SseData::Shutdown => None, + SseData::BlockAdded { block_hash, block } => { + build_default_block_added_translator().translate(block_hash, block) + } + SseData::TransactionAccepted(transaction) => { + maybe_translate_transaction_accepted(transaction) + } + SseData::TransactionProcessed { + transaction_hash, + initiator_addr, + timestamp, + ttl, + block_hash, + execution_result, + messages: _, + } => maybe_translate_transaction_processed( + transaction_hash, + initiator_addr, + timestamp, + ttl, + block_hash, + execution_result, + ), + SseData::TransactionExpired { transaction_hash } => { + maybe_translate_deploy_expired(transaction_hash) + } + SseData::Fault { + era_id, + public_key, + timestamp, + } => maybe_translate_fault(era_id, public_key, timestamp), + SseData::FinalitySignature(fs) => Some(translate_finality_signature(fs)), + SseData::Step { .. } => None, //we don't translate steps + } + } +} + +fn translate_finality_signature(fs: &FinalitySignature) -> LegacySseData { + match fs { + FinalitySignature::V1(v1) => { + LegacySseData::FinalitySignature(LegacyFinalitySignature::from_v1(v1)) + } + FinalitySignature::V2(v2) => { + LegacySseData::FinalitySignature(LegacyFinalitySignature::from_v2(v2)) + } + } +} + +fn maybe_translate_fault( + era_id: &EraId, + public_key: &PublicKey, + timestamp: &Timestamp, +) -> Option { + Some(LegacySseData::Fault { + era_id: *era_id, + public_key: public_key.clone(), + timestamp: *timestamp, + }) +} + +fn maybe_translate_deploy_expired(transaction_hash: &TransactionHash) -> Option { + match transaction_hash { + TransactionHash::Deploy(deploy_hash) => Some(LegacySseData::DeployExpired { + deploy_hash: *deploy_hash, + }), + TransactionHash::V1(_) => None, + } +} + +fn maybe_translate_transaction_processed( + transaction_hash: &TransactionHash, + initiator_addr: &InitiatorAddr, + timestamp: &Timestamp, + ttl: &TimeDiff, + block_hash: &BlockHash, + execution_result: &ExecutionResult, +) -> Option { + match transaction_hash { + TransactionHash::Deploy(deploy_hash) => { + let account = match initiator_addr { + InitiatorAddr::PublicKey(public_key) => public_key, + InitiatorAddr::AccountHash(_) => return None, //This shouldn't happen since we already are in TransactionHash::Deploy + }; + let execution_result = match execution_result { + ExecutionResult::V1(result) => result.clone(), + ExecutionResult::V2(result) => { + let maybe_result = + build_default_execution_result_translator().translate(result); + maybe_result? + } + }; + Some(LegacySseData::DeployProcessed { + deploy_hash: Box::new(*deploy_hash), + account: Box::new(account.clone()), + timestamp: *timestamp, + ttl: *ttl, + dependencies: Vec::new(), + block_hash: Box::new(*block_hash), + execution_result: Box::new(execution_result), + }) + } + _ => None, //V1 transactions can't be interpreted in the old format. + } +} + +fn maybe_translate_transaction_accepted(transaction: &Transaction) -> Option { + match transaction { + Transaction::Deploy(deploy) => Some(LegacySseData::DeployAccepted(deploy.clone())), + _ => None, //V2 transactions can't be interpreted in the old format. + } +} + +#[cfg(test)] +mod tests { + use super::fixtures::*; + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn should_translate_sse_to_legacy() { + for (sse_data, expected, scenario_name) in sse_translation_scenarios() { + let legacy_fs = LegacySseData::from(&sse_data); + assert_eq!( + legacy_fs, + expected, + "Failed when executing scenario {}", + scenario_name.as_str() + ); + } + } + + #[allow(clippy::too_many_lines)] + fn sse_translation_scenarios() -> Vec<(SseData, Option, String)> { + vec![ + ( + api_version(), + Some(legacy_api_version()), + "api_version".to_string(), + ), + ( + finality_signature_v1(), + Some(legacy_finality_signature()), + "finality_signature_v1".to_string(), + ), + ( + finality_signature_v2(), + Some(legacy_finality_signature()), + "finality_signature_v2".to_string(), + ), + ( + transaction_accepted(), + None, + "transaction_accepted".to_string(), + ), + ( + deploy_accepted(), + Some(legacy_deploy_accepted()), + "legacy_deploy_accepted".to_string(), + ), + ( + deploy_expired(), + Some(legacy_deploy_expired()), + "legacy_deploy_expired".to_string(), + ), + ( + transaction_expired(), + None, + "transaction_expired".to_string(), + ), + (fault(), Some(legacy_fault()), "fault".to_string()), + ( + block_added_v1(), + Some(legacy_block_added()), + "block_added_v1".to_string(), + ), + ( + block_added_v2(), + Some(legacy_block_added_from_v2()), + "block_added_v2".to_string(), + ), + ( + deploy_processed(), + Some(legacy_deploy_processed()), + "deploy_processed".to_string(), + ), + ] + } +} diff --git a/types/src/legacy_sse_data/structs.rs b/types/src/legacy_sse_data/structs.rs new file mode 100644 index 00000000..d8347db4 --- /dev/null +++ b/types/src/legacy_sse_data/structs.rs @@ -0,0 +1,88 @@ +use casper_types::{ + BlockHash, BlockHeaderV1, DeployHash, Digest, EraEndV1, EraId, ProtocolVersion, PublicKey, + Timestamp, +}; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +pub struct BlockV1 { + pub(super) hash: BlockHash, + pub(super) header: BlockHeaderV1, + pub(super) body: BlockBodyV1, +} + +impl BlockV1 { + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + block_hash: BlockHash, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + let body = BlockBodyV1::new(proposer, deploy_hashes, transfer_hashes); + + let header = BlockHeaderV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + OnceCell::from(block_hash), + ); + Self { + hash: block_hash, + header, + body, + } + } + + pub fn from(hash: BlockHash, header: &BlockHeaderV1, body: &casper_types::BlockBodyV1) -> Self { + let legacy_body = BlockBodyV1::new( + body.proposer().clone(), + body.deploy_hashes().to_vec(), + body.transfer_hashes().to_vec(), + ); + BlockV1 { + hash, + header: header.clone(), + body: legacy_body, + } + } +} + +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +pub struct BlockBodyV1 { + pub(super) proposer: PublicKey, + pub(super) deploy_hashes: Vec, + pub(super) transfer_hashes: Vec, +} + +impl BlockBodyV1 { + pub(crate) fn new( + proposer: PublicKey, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + } + } +} diff --git a/types/src/legacy_sse_data/translate_block_added.rs b/types/src/legacy_sse_data/translate_block_added.rs new file mode 100644 index 00000000..e1f5379e --- /dev/null +++ b/types/src/legacy_sse_data/translate_block_added.rs @@ -0,0 +1,390 @@ +use super::{ + structs, + translate_deploy_hashes::{ + DeployHashTranslator, StandardDeployHashesTranslator, TransferDeployHashesTranslator, + }, + LegacySseData, +}; +use casper_types::{Block, BlockHash, BlockV2, EraEndV1, EraEndV2, EraReport, U512}; +use mockall::automock; +use std::collections::BTreeMap; + +#[automock] +pub trait BlockV2Translator { + fn translate(&self, block_v2: &BlockV2) -> Option; +} + +#[automock] +pub trait BlockAddedTranslator { + fn translate(&self, block_hash: &BlockHash, block: &Block) -> Option; +} + +#[automock] +pub trait EraEndV2Translator { + fn translate(&self, era_end_v2: &EraEndV2) -> Option; +} + +#[derive(Default)] +pub struct DefaultEraEndV2Translator; + +impl EraEndV2Translator for DefaultEraEndV2Translator { + fn translate(&self, era_end: &EraEndV2) -> Option { + let mut rewards = BTreeMap::new(); + for (k, v) in era_end.rewards().iter() { + let amount = v.iter().cloned().sum::(); + if amount > U512::from(u64::MAX) { + //We're not able to cast the reward to u64, so we skip this era end. + return None; + } + rewards.insert(k.clone(), amount.as_u64()); + } + let era_report = EraReport::new( + era_end.equivocators().to_vec(), + rewards, + era_end.inactive_validators().to_vec(), + ); + Some(EraEndV1::new( + era_report, + era_end.next_era_validator_weights().clone(), + )) + } +} + +pub struct DefaultBlockV2Translator +where + ET: EraEndV2Translator, + DT: DeployHashTranslator, + TT: DeployHashTranslator, +{ + era_end_translator: ET, + deploy_hash_translator: DT, + transfer_hash_translator: TT, +} + +impl BlockV2Translator for DefaultBlockV2Translator +where + ET: EraEndV2Translator, + DT: DeployHashTranslator, + TT: DeployHashTranslator, +{ + fn translate(&self, block_v2: &BlockV2) -> Option { + let header = block_v2.header(); + let parent_hash = *header.parent_hash(); + let body_hash = *header.body_hash(); + let state_root_hash = *header.state_root_hash(); + let random_bit = block_v2.header().random_bit(); + let accumulated_seed = *header.accumulated_seed(); + let era_end = header + .era_end() + .and_then(|era_end| self.era_end_translator.translate(era_end)); + let timestamp = block_v2.header().timestamp(); + let era_id = block_v2.header().era_id(); + let height = block_v2.header().height(); + let protocol_version = block_v2.header().protocol_version(); + let block_hash = block_v2.hash(); + let body = block_v2.body(); + let proposer = header.proposer().clone(); + let deploy_hashes = self.deploy_hash_translator.translate(body); + let transfer_hashes = self.transfer_hash_translator.translate(body); + let block_v1 = structs::BlockV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + *block_hash, + deploy_hashes, + transfer_hashes, + ); + Some(block_v1) + } +} + +pub struct DefaultBlockAddedTranslator +where + BT: BlockV2Translator, +{ + block_v2_translator: BT, +} + +pub fn build_default_block_added_translator() -> DefaultBlockAddedTranslator< + DefaultBlockV2Translator< + DefaultEraEndV2Translator, + StandardDeployHashesTranslator, + TransferDeployHashesTranslator, + >, +> { + DefaultBlockAddedTranslator { + block_v2_translator: build_default_block_v2_translator(), + } +} + +pub fn build_default_block_v2_translator() -> DefaultBlockV2Translator< + DefaultEraEndV2Translator, + StandardDeployHashesTranslator, + TransferDeployHashesTranslator, +> { + DefaultBlockV2Translator { + era_end_translator: DefaultEraEndV2Translator, + deploy_hash_translator: StandardDeployHashesTranslator, + transfer_hash_translator: TransferDeployHashesTranslator, + } +} + +impl BlockAddedTranslator for DefaultBlockAddedTranslator +where + T: BlockV2Translator, +{ + fn translate(&self, block_hash: &BlockHash, block: &Block) -> Option { + match block { + Block::V1(block_v1) => Some(LegacySseData::BlockAdded { + block_hash: *block_hash, + block: structs::BlockV1::from(*block_v1.hash(), block_v1.header(), block_v1.body()), + }), + Block::V2(block_v2) => { + let maybe_block = self.block_v2_translator.translate(block_v2); + maybe_block.map(|block| LegacySseData::BlockAdded { + block_hash: *block_hash, + block, + }) + } + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use casper_types::{testing::TestRng, DeployHash, EraEndV1, EraId, EraReport, PublicKey, U512}; + use mockall::predicate; + use pretty_assertions::assert_eq; + use rand::Rng; + use serde::Serialize; + + use super::{ + BlockV2Translator, DefaultBlockV2Translator, DefaultEraEndV2Translator, EraEndV2Translator, + MockEraEndV2Translator, + }; + use crate::{ + legacy_sse_data::{fixtures::*, translate_deploy_hashes::MockDeployHashTranslator}, + testing::parse_public_key, + }; + + #[test] + pub fn default_block_v2_translator_translates_without_era_end_and_deploys() { + let mut test_rng = TestRng::new(); + let (mut era_end_translator, mut deploy_hash_translator, mut transfer_hash_translator) = + prepare_mocks(); + let block_v2 = block_v2( + &mut test_rng, + parent_hash(), + state_root_hash(), + timestamp(), + EraId::new(15678276), + 345678987, + proposer(), + ); + let era_end_ref = block_v2.header().era_end().unwrap(); + prepare_era_end_mock(&mut era_end_translator, era_end_ref, None); + prepare_deploys_mock(&mut deploy_hash_translator, &block_v2, vec![]); + prepare_transfer_mock(&mut transfer_hash_translator, &block_v2, vec![]); + let under_test = DefaultBlockV2Translator { + era_end_translator, + deploy_hash_translator, + transfer_hash_translator, + }; + + let got = under_test.translate(&block_v2); + + assert!(got.is_some()); + let expected = block_v1_no_deploys_no_era( + *block_v2.parent_hash(), + *block_v2.state_root_hash(), + *block_v2.body_hash(), + block_v2.random_bit(), + *block_v2.accumulated_seed(), + block_v2.timestamp(), + block_v2.era_id(), + block_v2.height(), + block_v2.proposer().clone(), + *block_v2.hash(), + ); + compare_as_json(&expected, &got.unwrap()); + } + + #[test] + pub fn default_block_v2_translator_passes_era_end_info_and_deploys() { + let mut test_rng = TestRng::new(); + let (mut era_end_translator, mut deploy_hash_translator, mut transfer_hash_translator) = + prepare_mocks(); + let block_v2 = block_v2( + &mut test_rng, + parent_hash(), + state_root_hash(), + timestamp(), + EraId::new(15678276), + 345678987, + proposer(), + ); + let era_end_ref = block_v2.header().era_end().unwrap(); + let report = EraReport::random(&mut test_rng); + let validator_weights = random_validator_weights(&mut test_rng); + let era_end = EraEndV1::new(report, validator_weights); + let deploy_hashes_1: Vec = + (0..3).map(|_| DeployHash::random(&mut test_rng)).collect(); + let deploy_hashes_2: Vec = + (0..3).map(|_| DeployHash::random(&mut test_rng)).collect(); + prepare_era_end_mock(&mut era_end_translator, era_end_ref, Some(era_end.clone())); + prepare_deploys_mock( + &mut deploy_hash_translator, + &block_v2, + deploy_hashes_1.clone(), + ); + prepare_transfer_mock( + &mut transfer_hash_translator, + &block_v2, + deploy_hashes_2.clone(), + ); + + let under_test = DefaultBlockV2Translator { + era_end_translator, + deploy_hash_translator, + transfer_hash_translator, + }; + + let got = under_test.translate(&block_v2).unwrap(); + assert_eq!(got.body.deploy_hashes, deploy_hashes_1); + assert_eq!(got.body.transfer_hashes, deploy_hashes_2); + } + + #[test] + fn default_era_end_v2_translator_translates_all_data() { + let under_test = DefaultEraEndV2Translator; + let era_end_v2 = era_end_v2(); + let maybe_translated = under_test.translate(&era_end_v2); + assert!(maybe_translated.is_some(), "{:?}", maybe_translated); + let translated = maybe_translated.unwrap(); + let mut expected_validator_weights = BTreeMap::new(); + expected_validator_weights.insert( + parse_public_key("0198957673ad060503e2ec7d98dc71af6f90ad1f854fe18025e3e7d0d1bbe5e32b"), + U512::from(1), + ); + expected_validator_weights.insert( + parse_public_key( + "02022d6bc4e3012cc4ae467b5525111cf7ed65883b05a1d924f1e654c64fad3a027c", + ), + U512::from(2), + ); + let mut rewards = BTreeMap::new(); + rewards.insert( + parse_public_key("01235b932586ae5cc3135f7a0dc723185b87e5bd3ae0ac126a92c14468e976ff25"), + 129457537, + ); + let report = EraReport::new( + vec![ + parse_public_key( + "010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b50", + ), + parse_public_key( + "02037c17d279d6e54375f7cfb3559730d5434bfedc8638a3f95e55f6e85fc9e8f611", + ), + parse_public_key( + "02026d4b741a0ece4b3d6d61294a8db28a28dbd734133694582d38f240686ec61d05", + ), + ], + rewards, + vec![parse_public_key( + "010a10a45ea0aff7af1ffef92287d00ec4cf01c5e9e2952e018a2fbb0f0ede2b51", + )], + ); + let expected = EraEndV1::new(report, expected_validator_weights); + assert_eq!(translated, expected); + } + + #[test] + fn default_era_end_v2_translator_returns_none_when_reward_exceeds_u64() { + let under_test = DefaultEraEndV2Translator; + let era_end_v2 = era_end_v2_with_reward_exceeding_u64(); + let maybe_translated = under_test.translate(&era_end_v2); + assert!(maybe_translated.is_none()); + } + + fn compare_as_json(left: &T, right: &Y) + where + T: Serialize, + Y: Serialize, + { + let left_value = serde_json::to_value(left).unwrap(); + let right_value = serde_json::to_value(right).unwrap(); + assert_eq!(left_value, right_value); + } + + fn prepare_deploys_mock( + deploy_hash_translator: &mut MockDeployHashTranslator, + block_v2: &casper_types::BlockV2, + deploys: Vec, + ) { + deploy_hash_translator + .expect_translate() + .times(1) + .with(predicate::eq(block_v2.body().clone())) + .return_const(deploys); + } + + fn prepare_transfer_mock( + transfer_hash_translator: &mut MockDeployHashTranslator, + block_v2: &casper_types::BlockV2, + deploys: Vec, + ) { + transfer_hash_translator + .expect_translate() + .times(1) + .with(predicate::eq(block_v2.body().clone())) + .return_const(deploys); + } + + fn prepare_era_end_mock( + era_end_translator: &mut MockEraEndV2Translator, + era_end_ref: &casper_types::EraEndV2, + returned: Option, + ) { + era_end_translator + .expect_translate() + .times(1) + .with(predicate::eq(era_end_ref.clone())) + .return_const(returned); + } + + fn prepare_mocks() -> ( + MockEraEndV2Translator, + MockDeployHashTranslator, + MockDeployHashTranslator, + ) { + let era_end_translator = MockEraEndV2Translator::new(); + let deploy_hash_translator = MockDeployHashTranslator::new(); + let transfer_hash_translator = MockDeployHashTranslator::new(); + ( + era_end_translator, + deploy_hash_translator, + transfer_hash_translator, + ) + } + + fn random_validator_weights( + test_rng: &mut TestRng, + ) -> std::collections::BTreeMap { + let mut tree = BTreeMap::new(); + let number_of_weights = test_rng.gen_range(5..=10); + for _ in 0..number_of_weights { + tree.insert(PublicKey::random(test_rng), test_rng.gen()); + } + tree + } +} diff --git a/types/src/legacy_sse_data/translate_deploy_hashes.rs b/types/src/legacy_sse_data/translate_deploy_hashes.rs new file mode 100644 index 00000000..dafd1877 --- /dev/null +++ b/types/src/legacy_sse_data/translate_deploy_hashes.rs @@ -0,0 +1,111 @@ +use casper_types::{BlockBodyV2, DeployHash, TransactionHash, MINT_LANE_ID}; +use mockall::automock; + +#[automock] +pub trait DeployHashTranslator { + fn translate(&self, block_body_v2: &BlockBodyV2) -> Vec; +} + +#[derive(Default)] +pub struct StandardDeployHashesTranslator; + +#[derive(Default)] +pub struct TransferDeployHashesTranslator; + +impl DeployHashTranslator for StandardDeployHashesTranslator { + fn translate(&self, block_body_v2: &BlockBodyV2) -> Vec { + block_body_v2 + .transactions() + .iter() + .filter(|(lane_id, _)| **lane_id != MINT_LANE_ID) + .flat_map(|(_, hashes)| { + hashes + .iter() + .filter_map(|hash| match hash { + TransactionHash::Deploy(deploy_hash) => Some(*deploy_hash), + TransactionHash::V1(_) => None, + }) + .collect::>() + }) + .collect() + } +} + +impl DeployHashTranslator for TransferDeployHashesTranslator { + fn translate(&self, block_body_v2: &casper_types::BlockBodyV2) -> Vec { + block_body_v2 + .mint() + .filter_map(|el| match el { + TransactionHash::Deploy(deploy_hash) => Some(deploy_hash), + TransactionHash::V1(_) => None, + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use casper_types::{testing::TestRng, EraId}; + + use crate::legacy_sse_data::fixtures::*; + + use super::*; + + #[test] + fn standard_deploy_hashes_translator_uses_standard_deploy_transaction_hashes() { + let mut test_rng = TestRng::new(); + let under_test = StandardDeployHashesTranslator; + let ( + transactions, + standard_deploy_hash, + _standard_v1_hash, + _mint_deploy_hash, + _mint_v1_hash, + _install_upgrade_v1, + _auction_v1, + ) = sample_transactions(&mut test_rng); + let block_v2 = block_v2_with_transactions( + &mut test_rng, + parent_hash(), + state_root_hash(), + timestamp(), + EraId::new(15678276), + 345678987, + proposer(), + transactions.iter().collect(), + ); + let block_body = block_v2.body(); + assert_eq!(block_body.all_transactions().collect::>().len(), 6); + let translated = under_test.translate(block_body); + assert_eq!(translated, vec![standard_deploy_hash,]) + } + + #[test] + fn transfer_deploy_hashes_translator_uses_mint_deploy_transaction_hashes() { + let mut test_rng = TestRng::new(); + let under_test = TransferDeployHashesTranslator; + let ( + transactions, + _standard_deploy_hash, + _standard_v1_hash, + mint_deploy_hash, + _mint_v1_hash, + _install_upgrade_v1, + _auction_v1, + ) = sample_transactions(&mut test_rng); + let block_v2 = block_v2_with_transactions( + &mut test_rng, + parent_hash(), + state_root_hash(), + timestamp(), + EraId::new(15678276), + 345678987, + proposer(), + transactions.iter().collect(), + ); + let block_body = block_v2.body(); + assert_eq!(block_body.all_transactions().collect::>().len(), 6); + let translated = under_test.translate(block_body); + assert_eq!(translated, vec![mint_deploy_hash,]) + } +} diff --git a/types/src/legacy_sse_data/translate_execution_result.rs b/types/src/legacy_sse_data/translate_execution_result.rs new file mode 100644 index 00000000..49699b8b --- /dev/null +++ b/types/src/legacy_sse_data/translate_execution_result.rs @@ -0,0 +1,305 @@ +use casper_types::{ + execution::{ + execution_result_v1::{ExecutionEffect, NamedKey, TransformKindV1, TransformV1}, + Effects, ExecutionResultV1, ExecutionResultV2, TransformKindV2, TransformV2, + }, + NamedKeys, StoredValue, +}; + +pub fn build_default_execution_result_translator( +) -> DefaultExecutionResultV2Translator { + DefaultExecutionResultV2Translator { + effects_translator: DefaultExecutionEffectsTranslator, + } +} + +pub trait ExecutionResultV2Translator { + fn translate(&self, result: &ExecutionResultV2) -> Option; +} + +pub trait ExecutionEffectsTranslator { + fn translate(&self, effects: &Effects) -> Option; +} + +pub struct DefaultExecutionResultV2Translator +where + EET: ExecutionEffectsTranslator, +{ + effects_translator: EET, +} + +impl ExecutionResultV2Translator for DefaultExecutionResultV2Translator +where + EET: ExecutionEffectsTranslator, +{ + fn translate(&self, result: &ExecutionResultV2) -> Option { + let maybe_effects = self.effects_translator.translate(&result.effects); + if let Some(effect) = maybe_effects { + if let Some(err_msg) = &result.error_message { + Some(ExecutionResultV1::Failure { + effect, + transfers: vec![], + cost: result.cost, + error_message: err_msg.to_string(), + }) + } else { + Some(ExecutionResultV1::Success { + effect, + transfers: vec![], + cost: result.cost, + }) + } + } else { + None + } + } +} + +pub struct DefaultExecutionEffectsTranslator; + +impl ExecutionEffectsTranslator for DefaultExecutionEffectsTranslator { + fn translate(&self, effects: &Effects) -> Option { + let mut transforms: Vec = Vec::new(); + for ex_ef in effects.transforms() { + let key = *ex_ef.key(); + let maybe_transform_kind = map_transform_v2(ex_ef); + if let Some(transform_kind) = maybe_transform_kind { + let transform = TransformV1 { + key: key.to_formatted_string(), + transform: transform_kind, + }; + transforms.push(transform); + } else { + // If we stumble on a transform we can't translate, we should clear all of them + // so that the user won't get a partial view of the effects. + transforms.clear(); + break; + } + } + Some(ExecutionEffect { + // Operations will be empty since we can't translate them (no V2 entity has a corresponding entity in V1). + operations: vec![], + transforms, + }) + } +} + +fn map_transform_v2(ex_ef: &TransformV2) -> Option { + let maybe_transform_kind = match ex_ef.kind() { + TransformKindV2::Identity => Some(TransformKindV1::Identity), + TransformKindV2::Write(stored_value) => maybe_tanslate_stored_value(stored_value), + TransformKindV2::AddInt32(v) => Some(TransformKindV1::AddInt32(*v)), + TransformKindV2::AddUInt64(v) => Some(TransformKindV1::AddUInt64(*v)), + TransformKindV2::AddUInt128(v) => Some(TransformKindV1::AddUInt128(*v)), + TransformKindV2::AddUInt256(v) => Some(TransformKindV1::AddUInt256(*v)), + TransformKindV2::AddUInt512(v) => Some(TransformKindV1::AddUInt512(*v)), + TransformKindV2::AddKeys(keys) => handle_named_keys(keys), + TransformKindV2::Prune(key) => Some(TransformKindV1::Prune(*key)), + TransformKindV2::Failure(err) => Some(TransformKindV1::Failure(err.to_string())), + }; + maybe_transform_kind +} + +fn handle_named_keys(keys: &NamedKeys) -> Option { + let mut named_keys = vec![]; + for (name, key) in keys.iter() { + let named_key = NamedKey { + name: name.to_string(), + key: key.to_formatted_string(), + }; + named_keys.push(named_key); + } + Some(TransformKindV1::AddKeys(named_keys)) +} + +fn maybe_tanslate_stored_value(stored_value: &StoredValue) -> Option { + match stored_value { + StoredValue::CLValue(cl_value) => Some(TransformKindV1::WriteCLValue(cl_value.clone())), + StoredValue::Account(acc) => Some(TransformKindV1::WriteAccount(acc.account_hash())), + StoredValue::ContractWasm(_) => Some(TransformKindV1::WriteContractWasm), + StoredValue::Contract(_) => Some(TransformKindV1::WriteContract), + StoredValue::ContractPackage(_) => Some(TransformKindV1::WriteContractPackage), + StoredValue::Transfer(transfer_v1) => { + Some(TransformKindV1::WriteTransfer(transfer_v1.clone())) + } + StoredValue::DeployInfo(deploy_info) => { + Some(TransformKindV1::WriteDeployInfo(deploy_info.clone())) + } + StoredValue::EraInfo(era_info) => Some(TransformKindV1::WriteEraInfo(era_info.clone())), + StoredValue::Bid(bid) => Some(TransformKindV1::WriteBid(bid.clone())), + StoredValue::Withdraw(withdraw) => Some(TransformKindV1::WriteWithdraw(withdraw.clone())), + StoredValue::Unbonding(p) => Some(TransformKindV1::WriteUnbonding(p.clone())), + StoredValue::NamedKey(named_key) => { + let key_res = named_key.get_key(); + let name_res = named_key.get_name(); + if let (Ok(key), Ok(name)) = (key_res, name_res) { + Some(TransformKindV1::AddKeys(vec![NamedKey { + name: name.to_string(), + key: key.to_string(), + }])) + } else { + None + } + } + // following variants will not be understood by old clients since they were introduced in 2.x + StoredValue::AddressableEntity(_) + | StoredValue::BidKind(_) + | StoredValue::SmartContract(_) + | StoredValue::ByteCode(_) + | StoredValue::MessageTopic(_) + | StoredValue::Message(_) + | StoredValue::Prepayment(_) + | StoredValue::EntryPoint(_) + | StoredValue::RawBytes(_) => None, + } +} + +#[cfg(test)] +mod tests { + use super::{ + maybe_tanslate_stored_value, DefaultExecutionEffectsTranslator, ExecutionEffectsTranslator, + }; + use casper_types::{ + account::{AccountHash, ActionThresholds, AssociatedKeys, Weight}, + contract_messages::MessageChecksum, + contracts::{ContractPackage, ContractPackageStatus, ContractVersions, DisabledVersions}, + execution::{ + execution_result_v1::{NamedKey, TransformKindV1, TransformV1}, + Effects, TransformKindV2, TransformV2, + }, + testing::TestRng, + AccessRights, Account, CLValue, Groups, Key, NamedKeys, StoredValue, URef, + }; + use pretty_assertions::assert_eq; + use rand::Rng; + + #[test] + fn maybe_tanslate_stored_value_should_translate_values() { + let stored_value = StoredValue::CLValue(CLValue::from_t(1).unwrap()); + assert_eq!( + Some(TransformKindV1::WriteCLValue(CLValue::from_t(1).unwrap())), + maybe_tanslate_stored_value(&stored_value) + ); + + let account = random_account(); + let stored_value = StoredValue::Account(account); + assert_eq!( + Some(TransformKindV1::WriteAccount(AccountHash::new([9u8; 32]))), + maybe_tanslate_stored_value(&stored_value) + ); + + let contract_package = random_contract_package(); + let stored_value = StoredValue::ContractPackage(contract_package); + assert_eq!( + Some(TransformKindV1::WriteContractPackage), + maybe_tanslate_stored_value(&stored_value) + ); + } + + #[test] + fn default_execution_effects_translator_should_translate_effects_v2() { + let mut rng = TestRng::new(); + let under_test = DefaultExecutionEffectsTranslator {}; + let key_1: Key = rng.gen(); + let key_2: Key = rng.gen(); + let key_3: Key = rng.gen(); + let effects = build_example_effects(key_1, key_2, key_3); + + let maybe_translated = under_test.translate(&effects); + + assert!(maybe_translated.is_some(), "{:?}", maybe_translated); + let translated = maybe_translated.unwrap(); + assert!(translated.operations.is_empty()); + assert_eq!( + translated.transforms, + build_expected_transforms(key_1, key_2, key_3) + ); + } + + #[test] + fn default_execution_effects_translator_should_empty_transforms_if_something_was_not_translatable( + ) { + let mut rng = TestRng::new(); + let under_test = DefaultExecutionEffectsTranslator {}; + let key_1: Key = rng.gen(); + let key_2: Key = rng.gen(); + let key_3: Key = rng.gen(); + let mut effects = build_example_effects(key_1, key_2, key_3); + effects.push(TransformV2::new( + Key::Account(rng.gen()), + TransformKindV2::Write(StoredValue::Message(MessageChecksum([1; 32]))), + )); + + let maybe_translated = under_test.translate(&effects); + + assert!(maybe_translated.is_some(), "{:?}", maybe_translated); + let translated = maybe_translated.unwrap(); + assert!(translated.operations.is_empty()); + assert!(translated.transforms.is_empty()); + } + + fn build_expected_transforms(key_1: Key, key_2: Key, key_3: Key) -> Vec { + let transform_1 = TransformV1 { + key: key_1.to_formatted_string(), + transform: TransformKindV1::Identity, + }; + let transform_2 = TransformV1 { + key: key_2.to_formatted_string(), + transform: TransformKindV1::AddKeys(vec![ + NamedKey { + name: "key_1".to_string(), + key: key_1.to_formatted_string(), + }, + NamedKey { + name: "key_2".to_string(), + key: key_2.to_formatted_string(), + }, + ]), + }; + let transform_3 = TransformV1 { + key: key_3.to_formatted_string(), + transform: TransformKindV1::AddUInt64(1235), + }; + let expected_transforms = vec![transform_1, transform_2, transform_3]; + expected_transforms + } + + fn build_example_effects(key_1: Key, key_2: Key, key_3: Key) -> Effects { + let mut effects = Effects::new(); + effects.push(TransformV2::new(key_1, TransformKindV2::Identity)); + let mut named_keys = NamedKeys::new(); + named_keys.insert("key_1".to_string(), key_1); + named_keys.insert("key_2".to_string(), key_2); + effects.push(TransformV2::new( + key_2, + TransformKindV2::AddKeys(named_keys), + )); + effects.push(TransformV2::new(key_3, TransformKindV2::AddUInt64(1235))); + effects + } + + fn random_account() -> Account { + let account_hash = AccountHash::new([9u8; 32]); + let action_thresholds = ActionThresholds { + deployment: Weight::new(8), + key_management: Weight::new(11), + }; + Account::new( + account_hash, + NamedKeys::default(), + URef::new([43; 32], AccessRights::READ_ADD_WRITE), + AssociatedKeys::default(), + action_thresholds, + ) + } + + fn random_contract_package() -> ContractPackage { + ContractPackage::new( + URef::new([0; 32], AccessRights::NONE), + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + ContractPackageStatus::default(), + ) + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index 0b82ee11..b7f12768 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -4,18 +4,20 @@ #[cfg_attr(not(test), macro_use)] extern crate alloc; -pub mod block; -pub mod deploy; -mod digest; -mod executable_deploy_item; mod filter; -pub mod metrics; +pub mod legacy_sse_data; pub mod sse_data; -#[cfg(feature = "sse-data-testing")] +#[cfg(any(feature = "sse-data-testing", test))] mod testing; -pub use crate::executable_deploy_item::ExecutableDeployItem; -pub use block::{json_compatibility::JsonBlock, Block, BlockHash, FinalitySignature}; -pub use deploy::{Deploy, DeployHash}; -pub use digest::Digest; +use casper_types::ProtocolVersion; pub use filter::Filter; +use std::str::FromStr; + +use once_cell::sync::Lazy; +pub static SIDECAR_VERSION: Lazy = Lazy::new(|| { + let major: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_MAJOR")).unwrap(); + let minor: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_MINOR")).unwrap(); + let patch: u32 = FromStr::from_str(env!("CARGO_PKG_VERSION_PATCH")).unwrap(); + ProtocolVersion::from_parts(major, minor, patch) +}); diff --git a/types/src/metrics.rs b/types/src/metrics.rs deleted file mode 100644 index 78c1cd6c..00000000 --- a/types/src/metrics.rs +++ /dev/null @@ -1,147 +0,0 @@ -use once_cell::sync::Lazy; -use prometheus::{GaugeVec, HistogramOpts, HistogramVec, IntCounterVec, Opts, Registry}; -#[cfg(feature = "additional-metrics")] -const DB_OPERATION_BUCKETS: &[f64; 8] = &[ - 3e+5_f64, 3e+6_f64, 10e+6_f64, 20e+6_f64, 5e+7_f64, 1e+8_f64, 5e+8_f64, 1e+9_f64, -]; -const BUCKETS: &[f64; 8] = &[ - 5e+2_f64, 1e+3_f64, 2e+3_f64, 5e+3_f64, 5e+4_f64, 5e+5_f64, 5e+6_f64, 5e+7_f64, -]; - -static REGISTRY: Lazy = Lazy::new(Registry::new); -pub static ERROR_COUNTS: Lazy = Lazy::new(|| { - let counter = IntCounterVec::new( - Opts::new("error_counts", "Error counts"), - &["category", "description"], - ) - .unwrap(); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); -pub static RECEIVED_BYTES: Lazy = Lazy::new(|| { - let counter = HistogramVec::new( - HistogramOpts { - common_opts: Opts::new("received_bytes", "Received bytes"), - buckets: Vec::from(BUCKETS as &'static [f64]), - }, - &["filter"], - ) - .unwrap(); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); -pub static INTERNAL_EVENTS: Lazy = Lazy::new(|| { - let counter = IntCounterVec::new( - Opts::new("internal_events", "Count of internal events"), - &["category", "description"], - ) - .expect("metric can't be created"); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); -pub static NODE_STATUSES: Lazy = Lazy::new(|| { - let counter = GaugeVec::new( - Opts::new("node_statuses", "Current status of node to which sidecar is connected. Numbers mean: 0 - preparing; 1 - connecting; 2 - connected; 3 - reconnecting; -1 - defunct -> used up all connection attempts ; -2 - defunct -> node is in an incompatible version"), - &["node"] - ) - .expect("metric can't be created"); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); - -#[cfg(feature = "additional-metrics")] -pub static DB_OPERATION_TIMES: Lazy = Lazy::new(|| { - let counter = HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "db_operation_times", - "Times (in nanoseconds) it took to perform a database operation.", - ), - buckets: Vec::from(DB_OPERATION_BUCKETS as &'static [f64]), - }, - &["filter"], - ) - .expect("metric can't be created"); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); -#[cfg(feature = "additional-metrics")] -pub static EVENTS_PROCESSED_PER_SECOND: Lazy = Lazy::new(|| { - let counter = GaugeVec::new( - Opts::new("events_processed", "Events processed by sidecar. Split by \"module\" which should be either \"inbound\" or \"outbound\". \"Inbound\" means the number of events per second which were read from node endpoints and persisted in DB. \"Outbound\" means number of events pushed to clients."), - &["module"] - ) - .expect("metric can't be created"); - REGISTRY - .register(Box::new(counter.clone())) - .expect("cannot register metric"); - counter -}); - -pub struct MetricCollectionError { - reason: String, -} - -impl ToString for MetricCollectionError { - fn to_string(&self) -> String { - format!("MetricCollectionError: {}", self.reason) - } -} - -impl MetricCollectionError { - fn new(reason: String) -> Self { - MetricCollectionError { reason } - } -} - -pub fn metrics_summary() -> Result { - use prometheus::Encoder; - let encoder = prometheus::TextEncoder::new(); - let mut buffer = Vec::new(); - if let Err(e) = encoder.encode(®ISTRY.gather(), &mut buffer) { - return Err(MetricCollectionError::new(format!( - "could not encode custom metrics: {}", - e - ))); - }; - let mut res = match String::from_utf8(buffer.clone()) { - Ok(v) => v, - Err(e) => { - return Err(MetricCollectionError::new(format!( - "custom metrics have a non-utf8 character: {}", - e - ))); - } - }; - buffer.clear(); - - let mut buffer = Vec::new(); - if let Err(e) = encoder.encode(&prometheus::gather(), &mut buffer) { - return Err(MetricCollectionError::new(format!( - "error when encoding default prometheus metrics: {}", - e - ))); - }; - let res_custom = match String::from_utf8(buffer.clone()) { - Ok(v) => v, - Err(e) => { - return Err(MetricCollectionError::new(format!( - "Default and custom metrics have a non-utf8 character: {}", - e - ))) - } - }; - buffer.clear(); - res.push_str(&res_custom); - Ok(res) -} diff --git a/types/src/sse_data.rs b/types/src/sse_data.rs index a89303cc..7d6d1c2c 100644 --- a/types/src/sse_data.rs +++ b/types/src/sse_data.rs @@ -1,5 +1,5 @@ //! Common types used when dealing with serialization/deserialization of data from nodes, -//! also a "contemporary" data model which is based on 1.4.x node specification +//! also a "contemporary" data model which is based on 2.0.x node specification /// A filter for event types a client has subscribed to receive. #[derive(Clone, Copy, Eq, PartialEq, Debug)] @@ -7,9 +7,9 @@ pub enum EventFilter { ApiVersion, SidecarVersion, BlockAdded, - DeployAccepted, - DeployProcessed, - DeployExpired, + TransactionAccepted, + TransactionProcessed, + TransactionExpired, Fault, FinalitySignature, Step, @@ -17,10 +17,15 @@ pub enum EventFilter { #[cfg(feature = "sse-data-testing")] use super::testing; -use crate::{BlockHash, Deploy, DeployHash, FinalitySignature, JsonBlock}; #[cfg(feature = "sse-data-testing")] -use casper_types::testing::TestRng; -use casper_types::{EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp}; +use casper_types::ChainNameDigest; +use casper_types::{ + contract_messages::Messages, execution::ExecutionResult, Block, BlockHash, EraId, + FinalitySignature, InitiatorAddr, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, + TransactionHash, +}; +#[cfg(feature = "sse-data-testing")] +use casper_types::{execution::ExecutionResultV2, testing::TestRng, TestBlockBuilder}; #[cfg(feature = "sse-data-testing")] use rand::Rng; use serde::{Deserialize, Serialize}; @@ -40,16 +45,14 @@ pub(crate) fn to_error(msg: String) -> SseDataDeserializeError { SseDataDeserializeError::DeserializationError(msg) } -/// Deserializes a string which should contain json data and returns a result of either SseData (which is 1.4.x compliant) or an SseDataDeserializeError +/// Deserializes a string which should contain json data and returns a result of either SseData (which is 2.0.x compliant) or an SseDataDeserializeError /// /// * `json_raw`: string slice which should contain raw json data. -pub fn deserialize(json_raw: &str) -> Result<(SseData, bool), SseDataDeserializeError> { - serde_json::from_str::(json_raw) - .map(|el| (el, false)) - .map_err(|err| { - let error_message = format!("Serde Error: {}", err); - to_error(error_message) - }) +pub fn deserialize(json_raw: &str) -> Result { + serde_json::from_str::(json_raw).map_err(|err| { + let error_message = format!("Error when deserializing SSE event from node: {}", err); + to_error(error_message) + }) } /// The "data" field of the events sent on the event stream to clients. @@ -65,26 +68,22 @@ pub enum SseData { /// The given block has been added to the linear chain and stored locally. BlockAdded { block_hash: BlockHash, - block: Box, - }, - /// The given deploy has been newly-accepted by this node. - DeployAccepted { - #[serde(flatten)] - // It's an Arc to not create multiple copies of the same deploy for multiple subscribers. - deploy: Arc, + block: Box, }, - /// The given deploy has been executed, committed and forms part of the given block. - DeployProcessed { - deploy_hash: Box, - account: Box, + /// The given transaction has been newly-accepted by this node. + TransactionAccepted(Arc), + /// The given transaction has been executed, committed and forms part of the given block. + TransactionProcessed { + transaction_hash: Box, + initiator_addr: Box, timestamp: Timestamp, ttl: TimeDiff, - dependencies: Vec, block_hash: Box, execution_result: Box, + messages: Messages, }, - /// The given deploy has expired. - DeployExpired { deploy_hash: DeployHash }, + /// The given transaction has expired. + TransactionExpired { transaction_hash: TransactionHash }, /// Generic representation of validator's fault in an era. Fault { era_id: EraId, @@ -96,22 +95,41 @@ pub enum SseData { /// The execution effects produced by a `StepRequest`. Step { era_id: EraId, - execution_effect: Box, + execution_effects: Box, }, /// The node is about to shut down. Shutdown, } impl SseData { + pub fn type_label(&self) -> &str { + match self { + SseData::ApiVersion(_) => "ApiVersion", + SseData::SidecarVersion(_) => "SidecarVersion", + SseData::BlockAdded { .. } => "BlockAdded", + SseData::TransactionAccepted(_) => "TransactionAccepted", + SseData::TransactionProcessed { .. } => "TransactionProcessed", + SseData::TransactionExpired { .. } => "TransactionExpired", + SseData::Fault { .. } => "Fault", + SseData::FinalitySignature(_) => "FinalitySignature", + SseData::Step { .. } => "Step", + SseData::Shutdown => "Shutdown", + } + } pub fn should_include(&self, filter: &[EventFilter]) -> bool { match self { SseData::Shutdown => true, - SseData::ApiVersion(_) => filter.contains(&EventFilter::ApiVersion), + //Keeping the rest part as explicit match so that if a new variant is added, it will be caught by the compiler SseData::SidecarVersion(_) => filter.contains(&EventFilter::SidecarVersion), + SseData::ApiVersion(_) => filter.contains(&EventFilter::ApiVersion), SseData::BlockAdded { .. } => filter.contains(&EventFilter::BlockAdded), - SseData::DeployAccepted { .. } => filter.contains(&EventFilter::DeployAccepted), - SseData::DeployProcessed { .. } => filter.contains(&EventFilter::DeployProcessed), - SseData::DeployExpired { .. } => filter.contains(&EventFilter::DeployExpired), + SseData::TransactionAccepted { .. } => { + filter.contains(&EventFilter::TransactionAccepted) + } + SseData::TransactionProcessed { .. } => { + filter.contains(&EventFilter::TransactionProcessed) + } + SseData::TransactionExpired { .. } => filter.contains(&EventFilter::TransactionExpired), SseData::Fault { .. } => filter.contains(&EventFilter::Fault), SseData::FinalitySignature(_) => filter.contains(&EventFilter::FinalitySignature), SseData::Step { .. } => filter.contains(&EventFilter::Step), @@ -133,41 +151,48 @@ impl SseData { /// Returns a random `SseData::BlockAdded`. pub fn random_block_added(rng: &mut TestRng) -> Self { - let block = JsonBlock::random(rng); + let block = TestBlockBuilder::new().build(rng); SseData::BlockAdded { - block_hash: block.hash, - block: Box::new(block), + block_hash: *block.hash(), + block: Box::new(block.into()), } } - /// Returns a random `SseData::DeployAccepted`, along with the random `Deploy`. - pub fn random_deploy_accepted(rng: &mut TestRng) -> (Self, Deploy) { - let deploy = Deploy::random(rng); - let event = SseData::DeployAccepted { - deploy: Arc::new(deploy.clone()), - }; - (event, deploy) + /// Returns a random `SseData::TransactionAccepted`, along with the random `Transaction`. + pub fn random_transaction_accepted(rng: &mut TestRng) -> (Self, Transaction) { + let transaction = Transaction::random(rng); + let event = SseData::TransactionAccepted(Arc::new(transaction.clone())); + (event, transaction) } - /// Returns a random `SseData::DeployProcessed`. - pub fn random_deploy_processed(rng: &mut TestRng) -> Self { - let deploy = Deploy::random(rng); - SseData::DeployProcessed { - deploy_hash: Box::new(*deploy.hash()), - account: Box::new(deploy.header().account().clone()), - timestamp: deploy.header().timestamp(), - ttl: deploy.header().ttl(), - dependencies: deploy.header().dependencies().clone(), + /// Returns a random `SseData::TransactionProcessed`. + pub fn random_transaction_processed(rng: &mut TestRng) -> Self { + let transaction = Transaction::random(rng); + let timestamp = match &transaction { + Transaction::Deploy(deploy) => deploy.header().timestamp(), + Transaction::V1(v1_transaction) => v1_transaction.timestamp(), + }; + let ttl = match &transaction { + Transaction::Deploy(deploy) => deploy.header().ttl(), + Transaction::V1(v1_transaction) => v1_transaction.ttl(), + }; + + SseData::TransactionProcessed { + transaction_hash: Box::new(TransactionHash::random(rng)), + initiator_addr: Box::new(transaction.initiator_addr().clone()), + timestamp, + ttl, block_hash: Box::new(BlockHash::random(rng)), - execution_result: Box::new(rng.gen()), + execution_result: Box::new(ExecutionResult::random(rng)), + messages: rng.random_vec(1..5), } } - /// Returns a random `SseData::DeployExpired` - pub fn random_deploy_expired(rng: &mut TestRng) -> Self { - let deploy = testing::create_expired_deploy(Timestamp::now(), rng); - SseData::DeployExpired { - deploy_hash: *deploy.hash(), + /// Returns a random `SseData::TransactionExpired` + pub fn random_transaction_expired(rng: &mut TestRng) -> Self { + let transaction = testing::create_expired_transaction(Timestamp::now(), rng); + SseData::TransactionExpired { + transaction_hash: transaction.hash(), } } @@ -182,29 +207,44 @@ impl SseData { /// Returns a random `SseData::FinalitySignature`. pub fn random_finality_signature(rng: &mut TestRng) -> Self { + let block_hash = BlockHash::random(rng); + let block_height = rng.gen::(); + let era_id = EraId::random(rng); + let chain_name_digest = ChainNameDigest::random(rng); SseData::FinalitySignature(Box::new(FinalitySignature::random_for_block( - BlockHash::random(rng), - rng.gen(), + block_hash, + block_height, + era_id, + chain_name_digest, rng, ))) } /// Returns a random `SseData::Step`. pub fn random_step(rng: &mut TestRng) -> Self { - let execution_effect = match rng.gen::() { - ExecutionResult::Success { effect, .. } | ExecutionResult::Failure { effect, .. } => { - effect - } - }; + let execution_effects = ExecutionResultV2::random(rng); + SseData::Step { era_id: EraId::new(rng.gen()), - execution_effect: to_raw_value(&execution_effect).unwrap(), + execution_effects: to_raw_value(&execution_effects).unwrap(), } } + + /// Returns a random `SseData::SidecarVersion`. + pub fn random_sidecar_version(rng: &mut TestRng) -> Self { + let protocol_version = ProtocolVersion::from_parts( + rng.gen_range(2..10), + rng.gen::() as u32, + rng.gen::() as u32, + ); + SseData::SidecarVersion(protocol_version) + } } #[cfg(feature = "sse-data-testing")] pub mod test_support { + use serde_json::json; + pub const BLOCK_HASH_1: &str = "ca52062424e9d5631a34b7b401e123927ce29d4bd10bc97c7df0aa752f131bb7"; pub const BLOCK_HASH_2: &str = @@ -215,21 +255,21 @@ pub mod test_support { "000625a798318315a4f401828f6d53371a623d79653db03a79a4cfbdd1e4ae53"; pub fn example_api_version() -> String { - "{\"ApiVersion\":\"1.5.2\"}".to_string() + "{\"ApiVersion\":\"2.0.0\"}".to_string() } pub fn shutdown() -> String { "\"Shutdown\"".to_string() } - pub fn example_block_added_1_5_2(block_hash: &str, height: &str) -> String { - let raw_block_added = format!("{{\"BlockAdded\":{{\"block_hash\":\"{block_hash}\",\"block\":{{\"hash\":\"{block_hash}\",\"header\":{{\"parent_hash\":\"4a28718301a83a43563ec42a184294725b8dd188aad7a9fceb8a2fa1400c680e\",\"state_root_hash\":\"63274671f2a860e39bb029d289e688526e4828b70c79c678649748e5e376cb07\",\"body_hash\":\"6da90c09f3fc4559d27b9fff59ab2453be5752260b07aec65e0e3a61734f656a\",\"random_bit\":true,\"accumulated_seed\":\"c8b4f30a3e3e082f4f206f972e423ffb23d152ca34241ff94ba76189716b61da\",\"era_end\":{{\"era_report\":{{\"equivocators\":[],\"rewards\":[{{\"validator\":\"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\",\"amount\":1559401400039}},{{\"validator\":\"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8\",\"amount\":25895190891}}],\"inactive_validators\":[]}},\"next_era_validator_weights\":[{{\"validator\":\"01026ca707c348ed8012ac6a1f28db031fadd6eb67203501a353b867a08c8b9a80\",\"weight\":\"50538244651768072\"}},{{\"validator\":\"010427c1d1227c9d2aafe8c06c6e6b276da8dcd8fd170ca848b8e3e8e1038a6dc8\",\"weight\":\"839230678448335\"}}]}},\"timestamp\":\"2021-04-08T05:14:14.912Z\",\"era_id\":90,\"height\":{height},\"protocol_version\":\"1.0.0\"}},\"body\":{{\"proposer\":\"012bac1d0ff9240ff0b7b06d555815640497861619ca12583ddef434885416e69b\",\"deploy_hashes\":[],\"transfer_hashes\":[]}},\"proofs\":[]}}}}}}"); + pub fn example_block_added_2_0_0(hash: &str, height: u64) -> String { + let raw_block_added = json!({"BlockAdded":{"block_hash":hash,"block":{"Version2":{"hash":hash,"header":{"parent_hash":"327a6be4f8b23115e089875428ff03d9071a7020ce3e0f4734c43e4279ad77fc","state_root_hash":"4f1638725e8a92ad6432a76124ba4a6db365b00ff352beb58b8c48ed9ed4b68d","body_hash":"337a4c9e510e01e142a19e5d81203bdc43e59a4f9039288c01f7b89370e1d104","random_bit":true,"accumulated_seed":"7b7d7b18668dcc8ffecda5f5de1037f26cd61394f72357cdc9ba84f0f48e37c8","era_end":null,"timestamp":"2024-05-10T19:55:20.415Z","era_id":77,"height":height,"protocol_version":"2.0.0","proposer":"01cee2ff4318180282a73bfcd1446f8145e4d80508fecd76fc38dce13af491f0e5","current_gas_price":1,"last_switch_block_hash":"a3533c2625c6413be2287e581c5fca1a0165ebac02b051f9f07ccf1ad483cf2d"},"body":{"transactions":{"0":[],"1":[],"2":[],"3":[]},"rewarded_signatures":[[248],[0],[0]]}}}}}).to_string(); super::deserialize(&raw_block_added).unwrap(); // deserializing to make sure that the raw json string is in correct form raw_block_added } - pub fn example_finality_signature_1_5_2(block_hash: &str) -> String { - let raw_block_added = format!("{{\"FinalitySignature\":{{\"block_hash\":\"{block_hash}\",\"era_id\":8538,\"signature\":\"0157368db32b578c1cf97256c3012d50afc5745fe22df2f4be1efd0bdf82b63ce072b4726fdfb7c026068b38aaa67ea401b49d969ab61ae587af42c64de8914101\",\"public_key\":\"0138e64f04c03346e94471e340ca7b94ba3581e5697f4d1e59f5a31c0da720de45\"}}}}"); + pub fn example_finality_signature_2_0_0(hash: &str) -> String { + let raw_block_added = format!("{{\"FinalitySignature\":{{\"V2\":{{\"block_hash\":\"{hash}\",\"block_height\":123026,\"era_id\":279,\"chain_name_hash\":\"f087a92e6e7077b3deb5e00b14a904e34c7068a9410365435bc7ca5d3ac64301\",\"signature\":\"01f2e7303a064d68b83d438c55056db2e32eda973f24c548176ac654580f0a6ef8b8b4ce7758bcee6f889bc5d4a653b107d6d4c9f5f20701c08259ece28095a10d\",\"public_key\":\"0126d4637eb0c0769274f03a696df1112383fa621c9f73f57af4c5c0fbadafa8cf\"}}}}}}"); super::deserialize(&raw_block_added).unwrap(); // deserializing to make sure that the raw json string is in correct form raw_block_added } diff --git a/types/src/testing.rs b/types/src/testing.rs index 95c8bea7..50377bab 100644 --- a/types/src/testing.rs +++ b/types/src/testing.rs @@ -3,26 +3,65 @@ //! Contains various parts and components to aid writing tests and simulations using the //! `casper-node` library. -use casper_types::{testing::TestRng, TimeDiff, Timestamp}; - -use crate::Deploy; +#[cfg(feature = "sse-data-testing")] +use casper_types::{testing::TestRng, Deploy, TimeDiff, Timestamp, Transaction}; +#[cfg(test)] +use casper_types::{BlockHash, Digest, PublicKey}; +#[cfg(feature = "sse-data-testing")] +use rand::Rng; +#[cfg(test)] +use serde_json::Value; +#[cfg(feature = "sse-data-testing")] /// Creates a test deploy created at given instant and with given ttl. -pub fn create_test_deploy( +pub fn create_test_transaction( created_ago: TimeDiff, ttl: TimeDiff, now: Timestamp, test_rng: &mut TestRng, -) -> Deploy { - Deploy::random_with_timestamp_and_ttl(test_rng, now - created_ago, ttl) +) -> Transaction { + use casper_types::{TransactionV1, MINT_LANE_ID}; + + if test_rng.gen() { + Transaction::Deploy(Deploy::random_with_timestamp_and_ttl( + test_rng, + now - created_ago, + ttl, + )) + } else { + let timestamp = now - created_ago; + let transaction = TransactionV1::random_with_lane_and_timestamp_and_ttl( + test_rng, + MINT_LANE_ID, + Some(timestamp), + Some(ttl), + ); + Transaction::V1(transaction) + } } +#[cfg(feature = "sse-data-testing")] /// Creates a random deploy that is considered expired. -pub fn create_expired_deploy(now: Timestamp, test_rng: &mut TestRng) -> Deploy { - create_test_deploy( +pub fn create_expired_transaction(now: Timestamp, test_rng: &mut TestRng) -> Transaction { + create_test_transaction( TimeDiff::from_seconds(20), TimeDiff::from_seconds(10), now, test_rng, ) } + +#[cfg(test)] +pub fn parse_public_key(arg: &str) -> PublicKey { + serde_json::from_value(Value::String(arg.to_string())).unwrap() +} + +#[cfg(test)] +pub fn parse_block_hash(arg: &str) -> BlockHash { + serde_json::from_value(Value::String(arg.to_string())).unwrap() +} + +#[cfg(test)] +pub fn parse_digest(arg: &str) -> Digest { + serde_json::from_value(Value::String(arg.to_string())).unwrap() +}