From b0d7bbad575a9b93f1fb1fec67b5729928b6312b Mon Sep 17 00:00:00 2001 From: Andy Nogueira Date: Mon, 6 Mar 2023 11:31:33 -0500 Subject: [PATCH 1/5] openapi doc fixes and QA docs fixes --- docs/qa/{v034/README.md => CometBFT-QA-34.md} | 134 ++++----- .../{v037/CometBFT.md => CometBFT-QA-37.md} | 54 ++-- docs/qa/README.md | 8 +- docs/qa/{v034/TMCore.md => TMCore-QA-34.md} | 50 ++-- docs/qa/{v037/TMCore.md => TMCore-QA-37.md} | 92 +++--- .../{v034/img => img34}/baseline/avg_cpu.png | Bin .../img => img34}/baseline/avg_memory.png | Bin .../baseline/avg_mempool_size.png | Bin .../baseline/block_rate_regular.png | Bin docs/qa/{v034/img => img34}/baseline/cpu.png | Bin .../{v034/img => img34}/baseline/memory.png | Bin .../img => img34}/baseline/mempool_size.png | Bin .../qa/{v034/img => img34}/baseline/peers.png | Bin .../{v034/img => img34}/baseline/rounds.png | Bin .../baseline/total_txs_rate_regular.png | Bin .../img => img34}/cmt1tm1/all_experiments.png | Bin .../{v034/img => img34}/cmt1tm1/avg_cpu.png | Bin .../img => img34}/cmt1tm1/avg_memory.png | Bin .../cmt1tm1/avg_mempool_size.png | Bin .../cmt1tm1/block_rate_regular.png | Bin docs/qa/{v034/img => img34}/cmt1tm1/cpu.png | Bin .../qa/{v034/img => img34}/cmt1tm1/memory.png | Bin .../img => img34}/cmt1tm1/mempool_size.png | Bin docs/qa/{v034/img => img34}/cmt1tm1/peers.png | Bin .../qa/{v034/img => img34}/cmt1tm1/rounds.png | Bin .../cmt1tm1/total_txs_rate_regular.png | Bin .../img => img34}/cmt2tm1/all_experiments.png | Bin .../{v034/img => img34}/cmt2tm1/avg_cpu.png | Bin .../img => img34}/cmt2tm1/avg_memory.png | Bin .../cmt2tm1/avg_mempool_size.png | Bin .../cmt2tm1/block_rate_regular.png | Bin docs/qa/{v034/img => img34}/cmt2tm1/cpu.png | Bin .../qa/{v034/img => img34}/cmt2tm1/memory.png | Bin .../img => img34}/cmt2tm1/mempool_size.png | Bin docs/qa/{v034/img => img34}/cmt2tm1/peers.png | Bin .../qa/{v034/img => img34}/cmt2tm1/rounds.png | Bin .../cmt2tm1/total_txs_rate_regular.png | Bin .../homogeneous/all_experiments.png | Bin .../img => img34}/homogeneous/avg_cpu.png | Bin .../img => img34}/homogeneous/avg_memory.png | Bin .../homogeneous/avg_mempool_size.png | Bin .../homogeneous/block_rate_regular.png | Bin .../{v034/img => img34}/homogeneous/cpu.png | Bin .../img => img34}/homogeneous/memory.png | Bin .../homogeneous/mempool_size.png | Bin .../{v034/img => img34}/homogeneous/peers.png | Bin .../img => img34}/homogeneous/rounds.png | Bin .../homogeneous/total_txs_rate_regular.png | Bin .../img => img34}/v034_200node_latencies.png | Bin .../v034_200node_latencies_zoomed.png | Bin .../v034_200node_tm2cmt1/all_experiments.png | Bin .../v034_200node_tm2cmt1/avg_cpu.png | Bin .../v034_200node_tm2cmt1/avg_memory.png | Bin .../v034_200node_tm2cmt1/avg_mempool_size.png | Bin .../block_rate_regular.png | Bin .../v034_200node_tm2cmt1/c2r200_merged.png | Bin .../v034_200node_tm2cmt1/cpu.png | Bin .../v034_200node_tm2cmt1/memory.png | Bin .../v034_200node_tm2cmt1/mempool_size.png | Bin .../v034_200node_tm2cmt1/peers.png | Bin .../v034_200node_tm2cmt1/rounds.png | Bin .../total_txs_rate_regular.png | Bin .../img => img34}/v034_latency_throughput.png | Bin .../img => img34}/v034_r200c2_heights.png | Bin .../img => img34}/v034_r200c2_load-runner.png | Bin .../{v034/img => img34}/v034_r200c2_load1.png | Bin .../v034_r200c2_mempool_size.png | Bin .../v034_r200c2_mempool_size_avg.png | Bin .../{v034/img => img34}/v034_r200c2_peers.png | Bin .../img => img34}/v034_r200c2_rounds.png | Bin .../{v034/img => img34}/v034_r200c2_rss.png | Bin .../img => img34}/v034_r200c2_rss_avg.png | Bin .../img => img34}/v034_r200c2_total-txs.png | Bin .../img => img34}/v034_report_tabbed.txt | 0 .../img => img34}/v034_rotating_heights.png | Bin .../v034_rotating_heights_ephe.png | Bin .../img => img34}/v034_rotating_latencies.png | Bin .../v034_rotating_latencies_uniq.png | Bin .../img => img34}/v034_rotating_load1.png | Bin .../img => img34}/v034_rotating_peers.png | Bin .../img => img34}/v034_rotating_rss_avg.png | Bin .../img => img34}/v034_rotating_total-txs.png | Bin .../200nodes_cmt037/all_experiments.png | Bin .../200nodes_cmt037/avg_mempool_size.png | Bin .../200nodes_cmt037/block_rate.png | Bin .../img => img37}/200nodes_cmt037/cpu.png | Bin ...e_75cb89a8-f876-4698-82f3-8aaab0b361af.png | Bin .../img => img37}/200nodes_cmt037/memory.png | Bin .../200nodes_cmt037/mempool_size.png | Bin .../img => img37}/200nodes_cmt037/peers.png | Bin .../img => img37}/200nodes_cmt037/rounds.png | Bin .../200nodes_cmt037/total_txs_rate.png | Bin .../200nodes_tm037/avg_mempool_size.png | Bin .../200nodes_tm037/block_rate_regular.png | Bin .../img => img37}/200nodes_tm037/cpu.png | Bin .../img => img37}/200nodes_tm037/memory.png | Bin .../200nodes_tm037/mempool_size.png | Bin .../img => img37}/200nodes_tm037/peers.png | Bin .../img => img37}/200nodes_tm037/rounds.png | Bin .../200nodes_tm037/total_txs_rate_regular.png | Bin .../200nodes_tm037/v037_200node_latencies.png | Bin .../v037_latency_throughput.png | Bin .../200nodes_tm037/v037_r200c2_heights.png | Bin .../200nodes_tm037/v037_r200c2_load1.png | Bin .../v037_r200c2_mempool_size.png | Bin .../v037_r200c2_mempool_size_avg.png | Bin .../200nodes_tm037/v037_r200c2_peers.png | Bin .../200nodes_tm037/v037_r200c2_rounds.png | Bin .../200nodes_tm037/v037_r200c2_rss.png | Bin .../200nodes_tm037/v037_r200c2_rss_avg.png | Bin .../200nodes_tm037/v037_r200c2_total-txs.png | Bin .../200nodes_tm037/v037_report_tabbed.txt | 0 .../200nodes_tm037/v037_rotating_heights.png | Bin .../v037_rotating_heights_ephe.png | Bin .../v037_rotating_latencies.png | Bin .../200nodes_tm037/v037_rotating_load1.png | Bin .../200nodes_tm037/v037_rotating_peers.png | Bin .../200nodes_tm037/v037_rotating_rss_avg.png | Bin .../v037_rotating_total-txs.png | Bin docs/qa/method.md | 10 +- rpc/openapi/openapi.yaml | 271 ++---------------- 121 files changed, 205 insertions(+), 414 deletions(-) rename docs/qa/{v034/README.md => CometBFT-QA-34.md} (76%) rename docs/qa/{v037/CometBFT.md => CometBFT-QA-37.md} (78%) rename docs/qa/{v034/TMCore.md => TMCore-QA-34.md} (88%) rename docs/qa/{v037/TMCore.md => TMCore-QA-37.md} (77%) rename docs/qa/{v034/img => img34}/baseline/avg_cpu.png (100%) rename docs/qa/{v034/img => img34}/baseline/avg_memory.png (100%) rename docs/qa/{v034/img => img34}/baseline/avg_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/baseline/block_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/baseline/cpu.png (100%) rename docs/qa/{v034/img => img34}/baseline/memory.png (100%) rename docs/qa/{v034/img => img34}/baseline/mempool_size.png (100%) rename docs/qa/{v034/img => img34}/baseline/peers.png (100%) rename docs/qa/{v034/img => img34}/baseline/rounds.png (100%) rename docs/qa/{v034/img => img34}/baseline/total_txs_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/all_experiments.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/avg_cpu.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/avg_memory.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/avg_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/block_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/cpu.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/memory.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/mempool_size.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/peers.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/rounds.png (100%) rename docs/qa/{v034/img => img34}/cmt1tm1/total_txs_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/all_experiments.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/avg_cpu.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/avg_memory.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/avg_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/block_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/cpu.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/memory.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/mempool_size.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/peers.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/rounds.png (100%) rename docs/qa/{v034/img => img34}/cmt2tm1/total_txs_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/all_experiments.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/avg_cpu.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/avg_memory.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/avg_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/block_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/cpu.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/memory.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/mempool_size.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/peers.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/rounds.png (100%) rename docs/qa/{v034/img => img34}/homogeneous/total_txs_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_latencies.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_latencies_zoomed.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/all_experiments.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/avg_cpu.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/avg_memory.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/avg_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/block_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/c2r200_merged.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/cpu.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/memory.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/mempool_size.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/peers.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/rounds.png (100%) rename docs/qa/{v034/img => img34}/v034_200node_tm2cmt1/total_txs_rate_regular.png (100%) rename docs/qa/{v034/img => img34}/v034_latency_throughput.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_heights.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_load-runner.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_load1.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_mempool_size.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_mempool_size_avg.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_peers.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_rounds.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_rss.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_rss_avg.png (100%) rename docs/qa/{v034/img => img34}/v034_r200c2_total-txs.png (100%) rename docs/qa/{v034/img => img34}/v034_report_tabbed.txt (100%) rename docs/qa/{v034/img => img34}/v034_rotating_heights.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_heights_ephe.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_latencies.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_latencies_uniq.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_load1.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_peers.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_rss_avg.png (100%) rename docs/qa/{v034/img => img34}/v034_rotating_total-txs.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/all_experiments.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/avg_mempool_size.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/block_rate.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/cpu.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/memory.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/mempool_size.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/peers.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/rounds.png (100%) rename docs/qa/{v037/img => img37}/200nodes_cmt037/total_txs_rate.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/avg_mempool_size.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/block_rate_regular.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/cpu.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/memory.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/mempool_size.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/peers.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/rounds.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/total_txs_rate_regular.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_200node_latencies.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_latency_throughput.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_heights.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_load1.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_mempool_size.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_mempool_size_avg.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_peers.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_rounds.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_rss.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_rss_avg.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_r200c2_total-txs.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_report_tabbed.txt (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_heights.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_heights_ephe.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_latencies.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_load1.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_peers.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_rss_avg.png (100%) rename docs/qa/{v037/img => img37}/200nodes_tm037/v037_rotating_total-txs.png (100%) diff --git a/docs/qa/v034/README.md b/docs/qa/CometBFT-QA-34.md similarity index 76% rename from docs/qa/v034/README.md rename to docs/qa/CometBFT-QA-34.md index f3ac53e1d54..d633426407e 100644 --- a/docs/qa/v034/README.md +++ b/docs/qa/CometBFT-QA-34.md @@ -1,12 +1,14 @@ --- order: 1 parent: - title: CometBFT Quality Assurance Results for v0.34.x + title: CometBFT QA Results v0.34.x description: This is a report on the results obtained when running v0.34.x on testnets - order: 2 + order: 3 --- -# v0.34.x - From Tendermint Core to CometBFT +# CometBFT QA Results v0.34.x + +## v0.34.x - From Tendermint Core to CometBFT This section reports on the QA process we followed before releasing the first `v0.34.x` version from our CometBFT repository. @@ -54,7 +56,7 @@ Therefore we carry out a complete run of the _200-node test_ on the following ne ## Configuration and Results In the following sections we provide the results of the _200 node test_. -Each section reports the baseline results (for reference), the homogeneous network scenario (all CometBFT nodes), +Each section reports the baseline results (for reference), the homogeneous network scenario (all CometBFT nodes), and the mixed networks with 1/2, 1/3 and 2/3 of Tendermint Core nodes. ### Saturation Point @@ -62,10 +64,10 @@ and the mixed networks with 1/2, 1/3 and 2/3 of Tendermint Core nodes. As the CometBFT release candidate under test has minimal changes with respect to Tendermint Core `v0.34.26`, other than the rebranding changes, we can confidently reuse the results from the `v0.34.x` baseline test regarding -the [saturation point](./TMCore.md#finding-the-saturation-point). +the [saturation point](TMCore-QA-34.md#finding-the-saturation-point). Therefore, we will simply use a load of (`r=200,c=2`) -(see the explanation [here](./TMCore.md#finding-the-saturation-point)) on all experiments. +(see the explanation [here](TMCore-QA-34.md#finding-the-saturation-point)) on all experiments. We also include the baseline results for quick reference and comparison. @@ -87,26 +89,26 @@ We refer to these UUID to indicate to the representative runs. ### CometBFT Homogeneous network -![latencies](./img/homogeneous/all_experiments.png) +![latencies](img34/homogeneous/all_experiments.png) ### 1/2 Tendermint Core - 1/2 CometBFT -![latencies](./img/cmt1tm1/all_experiments.png) +![latencies](img34/cmt1tm1/all_experiments.png) ### 1/3 Tendermint Core - 2/3 CometBFT -![latencies](./img/cmt2tm1/all_experiments.png) +![latencies](img34/cmt2tm1/all_experiments.png) ### 2/3 Tendermint Core - 1/3 CometBFT -![latencies_all_tm2_3_cmt1_3](./img/v034_200node_tm2cmt1/all_experiments.png) +![latencies_all_tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/all_experiments.png) ## Prometheus Metrics This section reports on the key Prometheus metrics extracted from the following experiments. -* Baseline results: `v0.34.x`, obtained in October 2022 and reported [here](./TMCore.md). +* Baseline results: `v0.34.x`, obtained in October 2022 and reported [here](TMCore-QA-34.md). * CometBFT homogeneous network: experiment with UUID starting with `be8c`. * Mixed network, 1/2 Tendermint Core `v0.34.26` and 1/2 running CometBFT: experiment with UUID starting with `04ee`. * Mixed network, 1/3 Tendermint Core `v0.34.26` and 2/3 running CometBFT: experiment with UUID starting with `fc5e`. @@ -125,35 +127,35 @@ The second one shows the evolution of the average over all full nodes. #### Baseline -![mempool-cumulative](./img/baseline/mempool_size.png) +![mempool-cumulative](img34/baseline/mempool_size.png) -![mempool-avg](./img/baseline/avg_mempool_size.png) +![mempool-avg](img34/baseline/avg_mempool_size.png) #### CometBFT Homogeneous network The results for the homogeneous network and the baseline are similar in terms of outstanding transactions. -![mempool-cumulative-homogeneous](./img/homogeneous/mempool_size.png) +![mempool-cumulative-homogeneous](img34/homogeneous/mempool_size.png) -![mempool-avg-homogeneous](./img/homogeneous/avg_mempool_size.png) +![mempool-avg-homogeneous](img34/homogeneous/avg_mempool_size.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![mempool size](./img/cmt1tm1/mempool_size.png) +![mempool size](img34/cmt1tm1/mempool_size.png) -![average mempool size](./img/cmt1tm1/avg_mempool_size.png) +![average mempool size](img34/cmt1tm1/avg_mempool_size.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![mempool size](./img/cmt2tm1/mempool_size.png) +![mempool size](img34/cmt2tm1/mempool_size.png) -![average mempool size](./img/cmt2tm1/avg_mempool_size.png) +![average mempool size](img34/cmt2tm1/avg_mempool_size.png) #### 2/3 Tendermint Core - 1/3 CometBFT -![mempool_tm2_3_cmt_1_3](./img/v034_200node_tm2cmt1/mempool_size.png) +![mempool_tm2_3_cmt_1_3](img34/v034_200node_tm2cmt1/mempool_size.png) -![mempool-avg_tm2_3_cmt_1_3](./img/v034_200node_tm2cmt1/avg_mempool_size.png) +![mempool-avg_tm2_3_cmt_1_3](img34/v034_200node_tm2cmt1/avg_mempool_size.png) ### Consensus Rounds per Height @@ -161,10 +163,10 @@ The following graphs show the rounds needed to complete each height and agree on A value of `0` shows that only one round was required (with id `0`), and a value of `1` shows that two rounds were required. -#### Baseline +#### Baseline We can see that round 1 is reached with a certain frequency. -![rounds](./img/baseline/rounds.png) +![rounds](img34/baseline/rounds.png) #### CometBFT Homogeneous network @@ -173,19 +175,19 @@ and a few nodes even needed to advance to round 2 at one point. This coincides with the time at which we observed the biggest peak in mempool size on the corresponding plot, shown above. -![rounds-homogeneous](./img/homogeneous/rounds.png) +![rounds-homogeneous](img34/homogeneous/rounds.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![peers](./img/cmt1tm1/rounds.png) +![peers](img34/cmt1tm1/rounds.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![peers](./img/cmt2tm1/rounds.png) +![peers](img34/cmt2tm1/rounds.png) #### 2/3 Tendermint Core - 1/3 CometBFT -![rounds-tm2_3_cmt1_3](./img/v034_200node_tm2cmt1/rounds.png) +![rounds-tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/rounds.png) ### Peers @@ -200,30 +202,30 @@ Seed nodes typically have a higher number of peers. The fact that non-seed nodes reach more than 50 peers is due to [#9548](https://github.com/tendermint/tendermint/issues/9548). -![peers](./img/baseline/peers.png) +![peers](img34/baseline/peers.png) #### CometBFT Homogeneous network -The results for the homogeneous network are very similar to the baseline. +The results for the homogeneous network are very similar to the baseline. The only difference being that the seed nodes seem to loose peers in the middle of the experiment. However this cannot be attributed to the differences in the code, which are mainly rebranding. -![peers-homogeneous](./img/homogeneous/peers.png) +![peers-homogeneous](img34/homogeneous/peers.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![peers](./img/cmt1tm1/peers.png) +![peers](img34/cmt1tm1/peers.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![peers](./img/cmt2tm1/peers.png) +![peers](img34/cmt2tm1/peers.png) #### 2/3 Tendermint Core - 1/3 CometBFT As in the homogeneous case, there is some variation in the number of peers for some nodes. These, however, do not affect the average. -![peers-tm2_3_cmt1_3](./img/v034_200node_tm2cmt1/peers.png) +![peers-tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/peers.png) ### Blocks Produced per Minute, Transactions Processed per Minute @@ -236,11 +238,11 @@ The thick red dashed line show the rates' moving averages. The average number of blocks/minute oscilate between 10 and 40. -![heights](./img/baseline/block_rate_regular.png) +![heights](img34/baseline/block_rate_regular.png) The number of transactions/minute tops around 30k. -![total-txs](./img/baseline/total_txs_rate_regular.png) +![total-txs](img34/baseline/total_txs_rate_regular.png) #### CometBFT Homogeneous network @@ -248,30 +250,30 @@ The number of transactions/minute tops around 30k. The plot showing the block production rate shows that the rate oscillates around 20 blocks/minute, mostly within the same range as the baseline. -![heights-homogeneous-rate](./img/homogeneous/block_rate_regular.png) +![heights-homogeneous-rate](img34/homogeneous/block_rate_regular.png) -The plot showing the transaction rate shows the rate stays around 20000 transactions per minute, +The plot showing the transaction rate shows the rate stays around 20000 transactions per minute, also topping around 30k. -![txs-homogeneous-rate](./img/homogeneous/total_txs_rate_regular.png) +![txs-homogeneous-rate](img34/homogeneous/total_txs_rate_regular.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![height rate](./img/cmt1tm1/block_rate_regular.png) +![height rate](img34/cmt1tm1/block_rate_regular.png) -![transaction rate](./img/cmt1tm1/total_txs_rate_regular.png) +![transaction rate](img34/cmt1tm1/total_txs_rate_regular.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![height rate](./img/cmt2tm1/block_rate_regular.png) +![height rate](img34/cmt2tm1/block_rate_regular.png) -![transaction rate](./img/cmt2tm1/total_txs_rate_regular.png) +![transaction rate](img34/cmt2tm1/total_txs_rate_regular.png) #### 2/3 Tendermint Core - 1/3 CometBFT -![height rate](./img/v034_200node_tm2cmt1/block_rate_regular.png) +![height rate](img34/v034_200node_tm2cmt1/block_rate_regular.png) -![transaction rate](./img/v034_200node_tm2cmt1/total_txs_rate_regular.png) +![transaction rate](img34/v034_200node_tm2cmt1/total_txs_rate_regular.png) ### Memory Resident Set Size @@ -279,83 +281,83 @@ The following graphs show the Resident Set Size (RSS) of all monitored processes #### Baseline -![rss](./img/baseline/memory.png) +![rss](img34/baseline/memory.png) -![rss-avg](./img/baseline/avg_memory.png) +![rss-avg](img34/baseline/avg_memory.png) #### CometBFT Homogeneous network This is the plot for the homogeneous network, which is slightly more stable than the baseline over the time of the experiment. -![rss-homogeneous](./img/homogeneous/memory.png) +![rss-homogeneous](img34/homogeneous/memory.png) And this is the average plot. It oscillates around 560 MiB, which is noticeably lower than the baseline. -![rss-avg-homogeneous](./img/homogeneous/avg_memory.png) +![rss-avg-homogeneous](img34/homogeneous/avg_memory.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![rss](./img/cmt1tm1/memory.png) +![rss](img34/cmt1tm1/memory.png) -![rss average](./img/cmt1tm1/avg_memory.png) +![rss average](img34/cmt1tm1/avg_memory.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![rss](./img/cmt2tm1/memory.png) +![rss](img34/cmt2tm1/memory.png) -![rss average](./img/cmt2tm1/avg_memory.png) +![rss average](img34/cmt2tm1/avg_memory.png) #### 2/3 Tendermint Core - 1/3 CometBFT -![rss](./img/v034_200node_tm2cmt1/memory.png) +![rss](img34/v034_200node_tm2cmt1/memory.png) -![rss average](./img/v034_200node_tm2cmt1/avg_memory.png) +![rss average](img34/v034_200node_tm2cmt1/avg_memory.png) ### CPU utilization -The following graphs show the `load1` of nodes, as typically shown in the first line of the Unix `top` +The following graphs show the `load1` of nodes, as typically shown in the first line of the Unix `top` command, and their average value. #### Baseline -![load1](./img/baseline/cpu.png) +![load1](img34/baseline/cpu.png) -![load1-avg](./img/baseline/avg_cpu.png) +![load1-avg](img34/baseline/avg_cpu.png) #### CometBFT Homogeneous network The load in the homogenous network is, similarly to the baseline case, below 5 and, therefore, normal. -![load1-homogeneous](./img/homogeneous/cpu.png) +![load1-homogeneous](img34/homogeneous/cpu.png) As expected, the average plot also looks similar. -![load1-homogeneous-avg](./img/homogeneous/avg_cpu.png) +![load1-homogeneous-avg](img34/homogeneous/avg_cpu.png) #### 1/2 Tendermint Core - 1/2 CometBFT -![load1](./img/cmt1tm1/cpu.png) +![load1](img34/cmt1tm1/cpu.png) -![average load1](./img/cmt1tm1/avg_cpu.png) +![average load1](img34/cmt1tm1/avg_cpu.png) #### 1/3 Tendermint Core - 2/3 CometBFT -![load1](./img/cmt2tm1/cpu.png) +![load1](img34/cmt2tm1/cpu.png) -![average load1](./img/cmt2tm1/avg_cpu.png) +![average load1](img34/cmt2tm1/avg_cpu.png) #### 2/3 Tendermint Core - 1/3 CometBFT -![load1](./img/v034_200node_tm2cmt1/cpu.png) +![load1](img34/v034_200node_tm2cmt1/cpu.png) -![average load1](./img/v034_200node_tm2cmt1/avg_cpu.png) +![average load1](img34/v034_200node_tm2cmt1/avg_cpu.png) ## Test Results The comparison of the baseline results and the homogeneous case show that both scenarios had similar numbers and are therefore equivalent. -The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. +The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. Although not the main goal, a comparison of metric numbers with the homogenous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. A conclusion of these tests is shown in the following table, along with the commit versions used in the experiments. diff --git a/docs/qa/v037/CometBFT.md b/docs/qa/CometBFT-QA-37.md similarity index 78% rename from docs/qa/v037/CometBFT.md rename to docs/qa/CometBFT-QA-37.md index 0095a3be95a..1717ecf3ecd 100644 --- a/docs/qa/v037/CometBFT.md +++ b/docs/qa/CometBFT-QA-37.md @@ -1,14 +1,14 @@ --- order: 1 parent: - title: CometBFT Quality Assurance Results for v0.37.x + title: CometBFT QA Results v0.37.x description: This is a report on the results obtained when running CometBFT v0.37.x on testnets - order: 2 + order: 5 --- -# v0.37.x +# CometBFT QA Results v0.37.x -This iteration of the QA was run on CometBFT `v0.37.0-alpha3`, the first `v0.37.x` version from the CometBFT repository. +This iteration of the QA was run on CometBFT `v0.37.0-alpha3`, the first `v0.37.x` version from the CometBFT repository. The changes with respect to the baseline, `TM v0.37.x` as of Oct 12, 2022 (Commit: 1cf9d8e276afe8595cba960b51cd056514965fd1), include the rebranding of our fork of Tendermint Core to CometBFT and several improvements, described in the CometBFT [CHANGELOG](https://github.com/cometbft/cometbft/blob/v0.37.0-alpha.3/CHANGELOG.md). @@ -19,7 +19,7 @@ As in other iterations of our QA process, we have used a 200-node network as tes ### Saturation point As in previous iterations, in our QA experiments, the system is subjected to a load slightly under a saturation point. -The method to identify the saturation point is explained [here](../v034/README.md#finding-the-saturation-point) and its application to the baseline is described [here](./TMCore.md#finding-the-saturation-point). +The method to identify the saturation point is explained [here](CometBFT-QA-34.md#finding-the-saturation-point) and its application to the baseline is described [here](TMCore-QA-37.md#finding-the-saturation-point). We use the same saturation point, that is, `c`, the number of connections created by the load runner process to the target node, is 2 and `r`, the rate or number of transactions issued per second, is 200. ## Examining latencies @@ -27,17 +27,17 @@ We use the same saturation point, that is, `c`, the number of connections create The following figure plots six experiments carried out with the network. Unique identifiers, UUID, for each execution are presented on top of each graph. -![latencies](./img/200nodes_cmt037/all_experiments.png) +![latencies](img37/200nodes_cmt037/all_experiments.png) We can see that the latencies follow comparable patterns across all experiments. Therefore, in the following sections we will only present the results for one representative run, chosen randomly, with UUID starting with `75cb89a8`. -![latencies](./img/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png). +![latencies](img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png). For reference, the following figure shows the latencies of different configuration of the baseline. `c=02 r=200` corresponds to the same configuration as in this experiment. -![all-latencies](./img/200nodes_tm037/v037_200node_latencies.png) +![all-latencies](img37/200nodes_tm037/v037_200node_latencies.png) As can be seen, latencies are similar. @@ -47,24 +47,24 @@ This section further examines key metrics for this experiment extracted from Pro ### Mempool Size -The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous at all full nodes. +The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous at all full nodes. It did not exhibit any unconstrained growth. The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools at a given time. -![mempoool-cumulative](./img/200nodes_cmt037/mempool_size.png) +![mempoool-cumulative](img37/200nodes_cmt037/mempool_size.png) The following picture shows the evolution of the average mempool size over all full nodes, which mostly oscilates between 1500 and 2000 outstanding transactions. -![mempool-avg](./img/200nodes_cmt037/avg_mempool_size.png) +![mempool-avg](img37/200nodes_cmt037/avg_mempool_size.png) The peaks observed coincide with the moments when some nodes reached round 1 of consensus (see below). The behavior is similar to the observed in the baseline, presented next. -![mempool-cumulative-baseline](./img/200nodes_tm037/mempool_size.png) +![mempool-cumulative-baseline](img37/200nodes_tm037/mempool_size.png) -![mempool-avg-baseline](./img/200nodes_tm037/avg_mempool_size.png) +![mempool-avg-baseline](img37/200nodes_tm037/avg_mempool_size.png) ### Peers @@ -73,29 +73,29 @@ The number of peers was stable at all nodes. It was higher for the seed nodes (around 140) than for the rest (between 16 and 78). The red dashed line denotes the average value. -![peers](./img/200nodes_cmt037/peers.png) +![peers](img37/200nodes_cmt037/peers.png) Just as in the baseline, shown next, the fact that non-seed nodes reach more than 50 peers is due to [\#9548]. -![peers](./img/200nodes_tm037/peers.png) +![peers](img37/200nodes_tm037/peers.png) ### Consensus Rounds per Height Most heights took just one round, that is, round 0, but some nodes needed to advance to round 1 and eventually round 2. -![rounds](./img/200nodes_cmt037/rounds.png) +![rounds](img37/200nodes_cmt037/rounds.png) The following specific run of the baseline presented better results, only requiring up to round 1, but reaching higher rounds is not uncommon in the corresponding software version. -![rounds](./img/200nodes_tm037/rounds.png) +![rounds](img37/200nodes_tm037/rounds.png) ### Blocks Produced per Minute, Transactions Processed per Minute -The following plot shows the rate in which blocks were created, from the point of view of each node. +The following plot shows the rate in which blocks were created, from the point of view of each node. That is, it shows when each node learned that a new block had been agreed upon. -![heights](./img/200nodes_cmt037/block_rate.png) +![heights](img37/200nodes_cmt037/block_rate.png) For most of the time when load was being applied to the system, most of the nodes stayed around 20 to 25 blocks/minute. @@ -104,26 +104,26 @@ The spike to more than 175 blocks/minute is due to a slow node catching up. The collective spike on the right of the graph marks the end of the load injection, when blocks become smaller (empty) and impose less strain on the network. This behavior is reflected in the following graph, which shows the number of transactions processed per minute. -![total-txs](./img/200nodes_cmt037/total_txs_rate.png) +![total-txs](img37/200nodes_cmt037/total_txs_rate.png) The baseline experienced a similar behavior, shown in the following two graphs. The first depicts the block rate. -![heights-baseline](./img/200nodes_tm037/block_rate_regular.png) +![heights-baseline](img37/200nodes_tm037/block_rate_regular.png) The second plots the transaction rate. -![total-txs-baseline](./img/200nodes_tm037/total_txs_rate_regular.png) +![total-txs-baseline](img37/200nodes_tm037/total_txs_rate_regular.png) ### Memory Resident Set Size The Resident Set Size of all monitored processes is plotted below, with maximum memory usage of 2GB. -![rss](./img/200nodes_cmt037/memory.png) +![rss](img37/200nodes_cmt037/memory.png) A similar behavior was shown in the baseline, presented next. -![rss](./img/200nodes_tm037/memory.png) +![rss](img37/200nodes_tm037/memory.png) The memory of all processes went down as the load as removed, showing no signs of unconstrained growth. @@ -136,11 +136,11 @@ as it usually appears in the It is contained below 5 on most nodes, as seen in the following graph. -![load1](./img/200nodes_cmt037/cpu.png) +![load1](img37/200nodes_cmt037/cpu.png) A similar behavior was seen in the baseline. -![load1-baseline](./img/200nodes_tm037/cpu.png) +![load1-baseline](img37/200nodes_tm037/cpu.png) ## Test Results @@ -154,4 +154,4 @@ A conclusion of these tests is shown in the following table, along with the comm |CometBFT | 2023-02-14 | v0.37.0-alpha3 (bef9a830e7ea7da30fa48f2cc236b1f465cc5833) | Pass -[\#9548]: https://github.com/tendermint/tendermint/issues/9548 \ No newline at end of file +[\#9548]: https://github.com/tendermint/tendermint/issues/9548 diff --git a/docs/qa/README.md b/docs/qa/README.md index 76341598eb2..d59049074bc 100644 --- a/docs/qa/README.md +++ b/docs/qa/README.md @@ -19,7 +19,7 @@ used to decide if a release is passing the Quality Assurance process. The results obtained in each release are stored in their own directory. The following releases have undergone the Quality Assurance process, and the corresponding reports include detailed information on tests and comparison with the baseline. -* [TM v0.34.x](./v034/TMCore.md) - Tested prior to releasing Tendermint Core v0.34.22. -* [v0.34.x](./v034/CometBFT.md) - Tested prior to releasing v0.34.27, using TM v0.34.x results as baseline. -* [TM v0.37.x](./v037/TMCore.md) - Tested prior to releasing TM v0.37.x, using TM v0.34.x results as baseline. -* [v0.37.x](./v037/CometBFT.md) - Tested on CometBFT v0.37.0-alpha3, using TM v0.37.x results as baseline. +* [TM v0.34.x](TMCore-QA-34.md) - Tested prior to releasing Tendermint Core v0.34.22. +* [v0.34.x](CometBFT-QA-34.md) - Tested prior to releasing v0.34.27, using TM v0.34.x results as baseline. +* [TM v0.37.x](TMCore-QA-37.md) - Tested prior to releasing TM v0.37.x, using TM v0.34.x results as baseline. +* [v0.37.x](CometBFT-QA-37.md) - Tested on CometBFT v0.37.0-alpha3, using TM v0.37.x results as baseline. diff --git a/docs/qa/v034/TMCore.md b/docs/qa/TMCore-QA-34.md similarity index 88% rename from docs/qa/v034/TMCore.md rename to docs/qa/TMCore-QA-34.md index 5fc1225d978..e5764611c06 100644 --- a/docs/qa/v034/TMCore.md +++ b/docs/qa/TMCore-QA-34.md @@ -1,12 +1,12 @@ --- order: 1 parent: - title: Tendermint Core Quality Assurance Results for v0.34.x + title: Tendermint Core QA Results v0.34.x description: This is a report on the results obtained when running v0.34.x on testnets order: 2 --- -# Tendermint Core v0.34.x +# Tendermint Core QA Results v0.34.x ## 200 Node Testnet @@ -18,7 +18,7 @@ from being stable: the load runner tries to produce slightly more transactions t be processed by the testnet. The following table summarizes the results for v0.34.x, for the different experiments -(extracted from file [`v034_report_tabbed.txt`](./img/v034_report_tabbed.txt)). +(extracted from file [`v034_report_tabbed.txt`](img34/v034_report_tabbed.txt)). The X axis of this table is `c`, the number of connections created by the load runner process to the target node. The Y axis of this table is `r`, the rate or number of transactions issued per second. @@ -57,14 +57,14 @@ in order to further study the performance of this release. This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for (`r=200,c=2`), where we can see that the load stays close to 0 most of the time. -![load-load-runner](./img/v034_r200c2_load-runner.png) +![load-load-runner](img34/v034_r200c2_load-runner.png) ### Examining latencies -The method described [here](../method.md) allows us to plot the latencies of transactions +The method described [here](method.md) allows us to plot the latencies of transactions for all experiments. -![all-latencies](./img/v034_200node_latencies.png) +![all-latencies](img34/v034_200node_latencies.png) As we can see, even the experiments beyond the saturation diagonal managed to keep transaction latency stable (i.e. not constantly increasing). @@ -84,7 +84,7 @@ This is a visual effect of the plot; what appear to be points in the plot are ac clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen) tiny axis intervals. The cluster shown below looks like a single point in the plot above. -![all-latencies-zoomed](./img/v034_200node_latencies_zoomed.png) +![all-latencies-zoomed](img34/v034_200node_latencies_zoomed.png) The plot of latencies can we used as a baseline to compare with other releases. @@ -92,7 +92,7 @@ The following plot summarizes average latencies versus overall throughput across different numbers of WebSocket connections to the node into which transactions are being loaded. -![latency-vs-throughput](./img/v034_latency_throughput.png) +![latency-vs-throughput](img34/v034_latency_throughput.png) ### Prometheus Metrics on the Chosen Experiment @@ -108,12 +108,12 @@ at a given time. The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round at some nodes. -![mempool-cumulative](./img/v034_r200c2_mempool_size.png) +![mempool-cumulative](img34/v034_r200c2_mempool_size.png) The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000 outstanding transactions. -![mempool-avg](./img/v034_r200c2_mempool_size_avg.png) +![mempool-avg](img34/v034_r200c2_mempool_size_avg.png) The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below). @@ -123,26 +123,26 @@ The number of peers was stable at all nodes. It was higher for the seed nodes (around 140) than for the rest (between 21 and 74). The fact that non-seed nodes reach more than 50 peers is due to #9548. -![peers](./img/v034_r200c2_peers.png) +![peers](img34/v034_r200c2_peers.png) #### Consensus Rounds per Height Most nodes used only round 0 for most heights, but some nodes needed to advance to round 1 for some heights. -![rounds](./img/v034_r200c2_rounds.png) +![rounds](img34/v034_r200c2_rounds.png) #### Blocks Produced per Minute, Transactions Processed per Minute The blocks produced per minute are the slope of this plot. -![heights](./img/v034_r200c2_heights.png) +![heights](img34/v034_r200c2_heights.png) Over a period of 2 minutes, the height goes from 530 to 569. This results in an average of 19.5 blocks produced per minute. The transactions processed per minute are the slope of this plot. -![total-txs](./img/v034_r200c2_total-txs.png) +![total-txs](img34/v034_r200c2_total-txs.png) Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, resulting in 17800 transactions per minute. However, we can see in the plot that @@ -154,11 +154,11 @@ we obtain 20343 transactions per minute. Resident Set Size of all monitored processes is plotted below. -![rss](./img/v034_r200c2_rss.png) +![rss](img34/v034_r200c2_rss.png) The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth. -![rss-avg](./img/v034_r200c2_rss_avg.png) +![rss-avg](img34/v034_r200c2_rss_avg.png) #### CPU utilization @@ -166,7 +166,7 @@ The best metric from Prometheus to gauge CPU utilization in a Unix machine is `l as it usually appears in the [output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). -![load1](./img/v034_r200c2_load1.png) +![load1](img34/v034_r200c2_load1.png) It is contained in most cases below 5, which is generally considered acceptable load. @@ -191,7 +191,7 @@ we are focusing on here. The plot of all latencies can be seen in the following plot. -![rotating-all-latencies](./img/v034_rotating_latencies.png) +![rotating-all-latencies](img34/v034_rotating_latencies.png) We can observe there are some very high latencies, towards the end of the test. Upon suspicion that they are duplicate transactions, we examined the latencies @@ -200,7 +200,7 @@ raw file and discovered there are more than 100K duplicate transactions. The following plot shows the latencies file where all duplicate transactions have been removed, i.e., only the first occurrence of a duplicate transaction is kept. -![rotating-all-latencies-uniq](./img/v034_rotating_latencies_uniq.png) +![rotating-all-latencies-uniq](img34/v034_rotating_latencies_uniq.png) This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way we addressed it when running the 200 node test with high loads: increasing the `cache_size` @@ -215,7 +215,7 @@ We are only interested in those for which the catch-up process (blocksync) may h Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot. -![rotating-heights](./img/v034_rotating_heights.png) +![rotating-heights](img34/v034_rotating_heights.png) Over a period of 5229 seconds, the height goes from 2 to 3638. This results in an average of 41 blocks produced per minute. @@ -225,11 +225,11 @@ The following plot shows only the heights reported by ephemeral nodes is only showed _once the node has switched to consensus_, hence the gaps when nodes are killed, wiped out, started from scratch, and catching up. -![rotating-heights-ephe](./img/v034_rotating_heights_ephe.png) +![rotating-heights-ephe](img34/v034_rotating_heights_ephe.png) The transactions processed per minute are the gradient of this plot. -![rotating-total-txs](./img/v034_rotating_total-txs.png) +![rotating-total-txs](img34/v034_rotating_total-txs.png) The small lines we see periodically close to `y=0` are the transactions that ephemeral nodes start processing when they are caught up. @@ -244,7 +244,7 @@ The plot below shows the evolution in peers throughout the experiment. The periodic changes observed are due to the ephemeral nodes being stopped, wiped out, and recreated. -![rotating-peers](./img/v034_rotating_peers.png) +![rotating-peers](img34/v034_rotating_peers.png) The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes are mostly at the lower part. @@ -254,7 +254,7 @@ are mostly at the lower part. The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end. This might be related to the increased in transaction load observed above. -![rotating-rss-avg](./img/v034_rotating_rss_avg.png) +![rotating-rss-avg](img34/v034_rotating_rss_avg.png) The memory taken by the validators and the ephemeral nodes (when they are up) is comparable. @@ -262,7 +262,7 @@ The memory taken by the validators and the ephemeral nodes (when they are up) is The plot shows metric `load1` for all nodes. -![rotating-load1](./img/v034_rotating_load1.png) +![rotating-load1](img34/v034_rotating_load1.png) It is contained under 5 most of the time, which is considered normal load. The purple line, which follows a different pattern is the validator receiving all diff --git a/docs/qa/v037/TMCore.md b/docs/qa/TMCore-QA-37.md similarity index 77% rename from docs/qa/v037/TMCore.md rename to docs/qa/TMCore-QA-37.md index 6797d2144c4..edff57b0276 100644 --- a/docs/qa/v037/TMCore.md +++ b/docs/qa/TMCore-QA-37.md @@ -1,12 +1,12 @@ --- order: 1 parent: - title: CometBFT Quality Assurance Results for v0.37.x + title: Tendermint Core QA Results v0.37.x description: This is a report on the results obtained when running TM v0.37.x on testnets - order: 2 + order: 4 --- -# v0.37.x +# Tendermint Core QA Results v0.37.x ## Issues discovered @@ -32,11 +32,11 @@ During this iteration of the QA process, the following issues were found: ### Finding the Saturation Point The first goal is to identify the saturation point and compare it with the baseline (v0.34.x). -For further details, see [this paragraph](../v034/README.md#finding-the-saturation-point) +For further details, see [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) in the baseline version. The following table summarizes the results for v0.37.x, for the different experiments -(extracted from file [`v037_report_tabbed.txt`](./img/200nodes_tm037/v037_report_tabbed.txt)). +(extracted from file [`v037_report_tabbed.txt`](img37/200nodes_tm037/v037_report_tabbed.txt)). The X axis of this table is `c`, the number of connections created by the load runner process to the target node. The Y axis of this table is `r`, the rate or number of transactions issued per second. @@ -63,7 +63,7 @@ The saturation point is beyond the diagonal: * `r=100,c=4` which is at the same place as the baseline. For more details on the saturation point, see -[this paragraph](../v034/README.md#finding-the-saturation-point) in the baseline version. +[this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) in the baseline version. The experiment chosen to examine Prometheus metrics is the same as in the baseline: **`r=200,c=2`**. @@ -72,27 +72,27 @@ The load runner's CPU load was negligible (near 0) when running `r=200,c=2`. ### Examining latencies -The method described [here](../method.md) allows us to plot the latencies of transactions +The method described [here](method.md) allows us to plot the latencies of transactions for all experiments. -![all-latencies](./img/200nodes_tm037/v037_200node_latencies.png) +![all-latencies](img37/200nodes_tm037/v037_200node_latencies.png) The data seen in the plot is similar to that of the baseline. -![all-latencies-bl](../v034/img/v034_200node_latencies.png) +![all-latencies-bl](img34/v034_200node_latencies.png) Therefore, for further details on these plots, -see [this paragraph](../v034/README.md#examining-latencies) in the baseline version. +see [this paragraph](CometBFT-QA-34.md#examining-latencies) in the baseline version. The following plot summarizes average latencies versus overall throughputs across different numbers of WebSocket connections to the node into which transactions are being loaded. -![latency-vs-throughput](./img/200nodes_tm037/v037_latency_throughput.png) +![latency-vs-throughput](img37/200nodes_tm037/v037_latency_throughput.png) This is similar to that of the baseline plot: -![latency-vs-throughput-bl](../v034/img/v034_latency_throughput.png) +![latency-vs-throughput-bl](img34/v034_latency_throughput.png) ### Prometheus Metrics on the Chosen Experiment @@ -106,55 +106,55 @@ at all full nodes. It did not exhibit any unconstrained growth. The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools at a given time. -![mempool-cumulative](./img/200nodes_tm037/v037_r200c2_mempool_size.png) +![mempool-cumulative](img37/200nodes_tm037/v037_r200c2_mempool_size.png) The plot below shows evolution of the average over all full nodes, which oscillate between 1500 and 2000 outstanding transactions. -![mempool-avg](./img/200nodes_tm037/v037_r200c2_mempool_size_avg.png) +![mempool-avg](img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png) The peaks observed coincide with the moments when some nodes reached round 1 of consensus (see below). **These plots yield similar results to the baseline**: -![mempool-cumulative-bl](../v034/img/v034_r200c2_mempool_size.png) +![mempool-cumulative-bl](img34/v034_r200c2_mempool_size.png) -![mempool-avg-bl](../v034/img/v034_r200c2_mempool_size_avg.png) +![mempool-avg-bl](img34/v034_r200c2_mempool_size_avg.png) #### Peers The number of peers was stable at all nodes. It was higher for the seed nodes (around 140) than for the rest (between 16 and 78). -![peers](./img/200nodes_tm037/v037_r200c2_peers.png) +![peers](img37/200nodes_tm037/v037_r200c2_peers.png) Just as in the baseline, the fact that non-seed nodes reach more than 50 peers is due to #9548. **This plot yields similar results to the baseline**: -![peers-bl](../v034/img/v034_r200c2_peers.png) +![peers-bl](img34/v034_r200c2_peers.png) #### Consensus Rounds per Height Most heights took just one round, but some nodes needed to advance to round 1 at some point. -![rounds](./img/200nodes_tm037/v037_r200c2_rounds.png) +![rounds](img37/200nodes_tm037/v037_r200c2_rounds.png) **This plot yields slightly better results than the baseline**: -![rounds-bl](../v034/img/v034_r200c2_rounds.png) +![rounds-bl](img34/v034_r200c2_rounds.png) #### Blocks Produced per Minute, Transactions Processed per Minute The blocks produced per minute are the gradient of this plot. -![heights](./img/200nodes_tm037/v037_r200c2_heights.png) +![heights](img37/200nodes_tm037/v037_r200c2_heights.png) Over a period of 2 minutes, the height goes from 477 to 524. This results in an average of 23.5 blocks produced per minute. The transactions processed per minute are the gradient of this plot. -![total-txs](./img/200nodes_tm037/v037_r200c2_total-txs.png) +![total-txs](img37/200nodes_tm037/v037_r200c2_total-txs.png) Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, resulting in 17800 transactions per minute. However, we can see in the plot that @@ -164,25 +164,25 @@ we obtain 23733 transactions per minute. **These plots yield similar results to the baseline**: -![heights-bl](../v034/img/v034_r200c2_heights.png) +![heights-bl](img34/v034_r200c2_heights.png) -![total-txs](../v034/img/v034_r200c2_total-txs.png) +![total-txs](img34/v034_r200c2_total-txs.png) #### Memory Resident Set Size Resident Set Size of all monitored processes is plotted below. -![rss](./img/200nodes_tm037/v037_r200c2_rss.png) +![rss](img37/200nodes_tm037/v037_r200c2_rss.png) The average over all processes oscillates around 380 MiB and does not demonstrate unconstrained growth. -![rss-avg](./img/200nodes_tm037/v037_r200c2_rss_avg.png) +![rss-avg](img37/200nodes_tm037/v037_r200c2_rss_avg.png) **These plots yield similar results to the baseline**: -![rss-bl](../v034/img/v034_r200c2_rss.png) +![rss-bl](img34/v034_r200c2_rss.png) -![rss-avg-bl](../v034/img/v034_r200c2_rss_avg.png) +![rss-avg-bl](img34/v034_r200c2_rss_avg.png) #### CPU utilization @@ -190,13 +190,13 @@ The best metric from Prometheus to gauge CPU utilization in a Unix machine is `l as it usually appears in the [output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). -![load1](./img/200nodes_tm037/v037_r200c2_load1.png) +![load1](img37/200nodes_tm037/v037_r200c2_load1.png) It is contained below 5 on most nodes. **This plot yields similar results to the baseline**: -![load1](../v034/img/v034_r200c2_load1.png) +![load1](img34/v034_r200c2_load1.png) ### Test Result @@ -211,18 +211,18 @@ Version: 1cf9d8e276afe8595cba960b51cd056514965fd1 We use the same load as in the baseline: `c=4,r=800`. Just as in the baseline tests, the version of CometBFT used for these tests is affected by #9539. -See this paragraph in the [baseline report](../v034/README.md#rotating-node-testnet) for further details. +See this paragraph in the [baseline report](CometBFT-QA-34.md#rotating-node-testnet) for further details. Finally, note that this setup allows for a fairer comparison between this version and the baseline. ### Latencies The plot of all latencies can be seen here. -![rotating-all-latencies](./img/200nodes_tm037/v037_rotating_latencies.png) +![rotating-all-latencies](img37/200nodes_tm037/v037_rotating_latencies.png) Which is similar to the baseline. -![rotating-all-latencies-bl](../v034/img/v034_rotating_latencies_uniq.png) +![rotating-all-latencies-bl](img34/v034_rotating_latencies_uniq.png) Note that we are comparing against the baseline plot with _unique_ transactions. This is because the problem with duplicate transactions @@ -238,27 +238,27 @@ We also show the baseline results for comparison. The blocks produced per minute are the gradient of this plot. -![rotating-heights](./img/200nodes_tm037/v037_rotating_heights.png) +![rotating-heights](img37/200nodes_tm037/v037_rotating_heights.png) Over a period of 4446 seconds, the height goes from 5 to 3323. This results in an average of 45 blocks produced per minute, which is similar to the baseline, shown below. -![rotating-heights-bl](../v034/img/v034_rotating_heights.png) +![rotating-heights-bl](img34/v034_rotating_heights.png) The following two plots show only the heights reported by ephemeral nodes. The second plot is the baseline plot for comparison. -![rotating-heights-ephe](./img/200nodes_tm037/v037_rotating_heights_ephe.png) +![rotating-heights-ephe](img37/200nodes_tm037/v037_rotating_heights_ephe.png) -![rotating-heights-ephe-bl](../v034/img/v034_rotating_heights_ephe.png) +![rotating-heights-ephe-bl](img34/v034_rotating_heights_ephe.png) By the length of the segments, we can see that ephemeral nodes in `v0.37` catch up slightly faster. The transactions processed per minute are the gradient of this plot. -![rotating-total-txs](./img/200nodes_tm037/v037_rotating_total-txs.png) +![rotating-total-txs](img37/200nodes_tm037/v037_rotating_total-txs.png) Over a period of 3852 seconds, the total goes from 597 to 267298 transactions in one of the validators, resulting in 4154 transactions per minute, which is slightly lower than the baseline, @@ -266,17 +266,17 @@ although the baseline had to deal with duplicate transactions. For comparison, this is the baseline plot. -![rotating-total-txs-bl](../v034/img/v034_rotating_total-txs.png) +![rotating-total-txs-bl](img34/v034_rotating_total-txs.png) #### Peers The plot below shows the evolution of the number of peers throughout the experiment. -![rotating-peers](./img/200nodes_tm037/v037_rotating_peers.png) +![rotating-peers](img37/200nodes_tm037/v037_rotating_peers.png) This is the baseline plot, for comparison. -![rotating-peers-bl](../v034/img/v034_rotating_peers.png) +![rotating-peers-bl](img34/v034_rotating_peers.png) The plotted values and their evolution are comparable in both plots. @@ -287,9 +287,9 @@ For further details on these plots, see the baseline report. The average Resident Set Size (RSS) over all processes looks slightly more stable on `v0.37` (first plot) than on the baseline (second plot). -![rotating-rss-avg](./img/200nodes_tm037/v037_rotating_rss_avg.png) +![rotating-rss-avg](img37/200nodes_tm037/v037_rotating_rss_avg.png) -![rotating-rss-avg-bl](../v034/img/v034_rotating_rss_avg.png) +![rotating-rss-avg-bl](img34/v034_rotating_rss_avg.png) The memory taken by the validators and the ephemeral nodes when they are up is comparable (not shown in the plots), just as observed in the baseline. @@ -298,11 +298,9 @@ just as observed in the baseline. The plot shows metric `load1` for all nodes. -![rotating-load1](./img/200nodes_tm037/200nodes_tm037/v037_rotating_load1.png) +![rotating-load1](img37/200nodes_tm037/v037_rotating_load1.png) -This is the baseline plot. - -![rotating-load1-bl](../v034/img/v034_rotating_load1.png) +![rotating-load1-bl](img34/v034_rotating_load1.png) In both cases, it is contained under 5 most of the time, which is considered normal load. The green line in the `v0.37` plot and the purple line in the baseline plot (`v0.34`) diff --git a/docs/qa/v034/img/baseline/avg_cpu.png b/docs/qa/img34/baseline/avg_cpu.png similarity index 100% rename from docs/qa/v034/img/baseline/avg_cpu.png rename to docs/qa/img34/baseline/avg_cpu.png diff --git a/docs/qa/v034/img/baseline/avg_memory.png b/docs/qa/img34/baseline/avg_memory.png similarity index 100% rename from docs/qa/v034/img/baseline/avg_memory.png rename to docs/qa/img34/baseline/avg_memory.png diff --git a/docs/qa/v034/img/baseline/avg_mempool_size.png b/docs/qa/img34/baseline/avg_mempool_size.png similarity index 100% rename from docs/qa/v034/img/baseline/avg_mempool_size.png rename to docs/qa/img34/baseline/avg_mempool_size.png diff --git a/docs/qa/v034/img/baseline/block_rate_regular.png b/docs/qa/img34/baseline/block_rate_regular.png similarity index 100% rename from docs/qa/v034/img/baseline/block_rate_regular.png rename to docs/qa/img34/baseline/block_rate_regular.png diff --git a/docs/qa/v034/img/baseline/cpu.png b/docs/qa/img34/baseline/cpu.png similarity index 100% rename from docs/qa/v034/img/baseline/cpu.png rename to docs/qa/img34/baseline/cpu.png diff --git a/docs/qa/v034/img/baseline/memory.png b/docs/qa/img34/baseline/memory.png similarity index 100% rename from docs/qa/v034/img/baseline/memory.png rename to docs/qa/img34/baseline/memory.png diff --git a/docs/qa/v034/img/baseline/mempool_size.png b/docs/qa/img34/baseline/mempool_size.png similarity index 100% rename from docs/qa/v034/img/baseline/mempool_size.png rename to docs/qa/img34/baseline/mempool_size.png diff --git a/docs/qa/v034/img/baseline/peers.png b/docs/qa/img34/baseline/peers.png similarity index 100% rename from docs/qa/v034/img/baseline/peers.png rename to docs/qa/img34/baseline/peers.png diff --git a/docs/qa/v034/img/baseline/rounds.png b/docs/qa/img34/baseline/rounds.png similarity index 100% rename from docs/qa/v034/img/baseline/rounds.png rename to docs/qa/img34/baseline/rounds.png diff --git a/docs/qa/v034/img/baseline/total_txs_rate_regular.png b/docs/qa/img34/baseline/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v034/img/baseline/total_txs_rate_regular.png rename to docs/qa/img34/baseline/total_txs_rate_regular.png diff --git a/docs/qa/v034/img/cmt1tm1/all_experiments.png b/docs/qa/img34/cmt1tm1/all_experiments.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/all_experiments.png rename to docs/qa/img34/cmt1tm1/all_experiments.png diff --git a/docs/qa/v034/img/cmt1tm1/avg_cpu.png b/docs/qa/img34/cmt1tm1/avg_cpu.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/avg_cpu.png rename to docs/qa/img34/cmt1tm1/avg_cpu.png diff --git a/docs/qa/v034/img/cmt1tm1/avg_memory.png b/docs/qa/img34/cmt1tm1/avg_memory.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/avg_memory.png rename to docs/qa/img34/cmt1tm1/avg_memory.png diff --git a/docs/qa/v034/img/cmt1tm1/avg_mempool_size.png b/docs/qa/img34/cmt1tm1/avg_mempool_size.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/avg_mempool_size.png rename to docs/qa/img34/cmt1tm1/avg_mempool_size.png diff --git a/docs/qa/v034/img/cmt1tm1/block_rate_regular.png b/docs/qa/img34/cmt1tm1/block_rate_regular.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/block_rate_regular.png rename to docs/qa/img34/cmt1tm1/block_rate_regular.png diff --git a/docs/qa/v034/img/cmt1tm1/cpu.png b/docs/qa/img34/cmt1tm1/cpu.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/cpu.png rename to docs/qa/img34/cmt1tm1/cpu.png diff --git a/docs/qa/v034/img/cmt1tm1/memory.png b/docs/qa/img34/cmt1tm1/memory.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/memory.png rename to docs/qa/img34/cmt1tm1/memory.png diff --git a/docs/qa/v034/img/cmt1tm1/mempool_size.png b/docs/qa/img34/cmt1tm1/mempool_size.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/mempool_size.png rename to docs/qa/img34/cmt1tm1/mempool_size.png diff --git a/docs/qa/v034/img/cmt1tm1/peers.png b/docs/qa/img34/cmt1tm1/peers.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/peers.png rename to docs/qa/img34/cmt1tm1/peers.png diff --git a/docs/qa/v034/img/cmt1tm1/rounds.png b/docs/qa/img34/cmt1tm1/rounds.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/rounds.png rename to docs/qa/img34/cmt1tm1/rounds.png diff --git a/docs/qa/v034/img/cmt1tm1/total_txs_rate_regular.png b/docs/qa/img34/cmt1tm1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v034/img/cmt1tm1/total_txs_rate_regular.png rename to docs/qa/img34/cmt1tm1/total_txs_rate_regular.png diff --git a/docs/qa/v034/img/cmt2tm1/all_experiments.png b/docs/qa/img34/cmt2tm1/all_experiments.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/all_experiments.png rename to docs/qa/img34/cmt2tm1/all_experiments.png diff --git a/docs/qa/v034/img/cmt2tm1/avg_cpu.png b/docs/qa/img34/cmt2tm1/avg_cpu.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/avg_cpu.png rename to docs/qa/img34/cmt2tm1/avg_cpu.png diff --git a/docs/qa/v034/img/cmt2tm1/avg_memory.png b/docs/qa/img34/cmt2tm1/avg_memory.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/avg_memory.png rename to docs/qa/img34/cmt2tm1/avg_memory.png diff --git a/docs/qa/v034/img/cmt2tm1/avg_mempool_size.png b/docs/qa/img34/cmt2tm1/avg_mempool_size.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/avg_mempool_size.png rename to docs/qa/img34/cmt2tm1/avg_mempool_size.png diff --git a/docs/qa/v034/img/cmt2tm1/block_rate_regular.png b/docs/qa/img34/cmt2tm1/block_rate_regular.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/block_rate_regular.png rename to docs/qa/img34/cmt2tm1/block_rate_regular.png diff --git a/docs/qa/v034/img/cmt2tm1/cpu.png b/docs/qa/img34/cmt2tm1/cpu.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/cpu.png rename to docs/qa/img34/cmt2tm1/cpu.png diff --git a/docs/qa/v034/img/cmt2tm1/memory.png b/docs/qa/img34/cmt2tm1/memory.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/memory.png rename to docs/qa/img34/cmt2tm1/memory.png diff --git a/docs/qa/v034/img/cmt2tm1/mempool_size.png b/docs/qa/img34/cmt2tm1/mempool_size.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/mempool_size.png rename to docs/qa/img34/cmt2tm1/mempool_size.png diff --git a/docs/qa/v034/img/cmt2tm1/peers.png b/docs/qa/img34/cmt2tm1/peers.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/peers.png rename to docs/qa/img34/cmt2tm1/peers.png diff --git a/docs/qa/v034/img/cmt2tm1/rounds.png b/docs/qa/img34/cmt2tm1/rounds.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/rounds.png rename to docs/qa/img34/cmt2tm1/rounds.png diff --git a/docs/qa/v034/img/cmt2tm1/total_txs_rate_regular.png b/docs/qa/img34/cmt2tm1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v034/img/cmt2tm1/total_txs_rate_regular.png rename to docs/qa/img34/cmt2tm1/total_txs_rate_regular.png diff --git a/docs/qa/v034/img/homogeneous/all_experiments.png b/docs/qa/img34/homogeneous/all_experiments.png similarity index 100% rename from docs/qa/v034/img/homogeneous/all_experiments.png rename to docs/qa/img34/homogeneous/all_experiments.png diff --git a/docs/qa/v034/img/homogeneous/avg_cpu.png b/docs/qa/img34/homogeneous/avg_cpu.png similarity index 100% rename from docs/qa/v034/img/homogeneous/avg_cpu.png rename to docs/qa/img34/homogeneous/avg_cpu.png diff --git a/docs/qa/v034/img/homogeneous/avg_memory.png b/docs/qa/img34/homogeneous/avg_memory.png similarity index 100% rename from docs/qa/v034/img/homogeneous/avg_memory.png rename to docs/qa/img34/homogeneous/avg_memory.png diff --git a/docs/qa/v034/img/homogeneous/avg_mempool_size.png b/docs/qa/img34/homogeneous/avg_mempool_size.png similarity index 100% rename from docs/qa/v034/img/homogeneous/avg_mempool_size.png rename to docs/qa/img34/homogeneous/avg_mempool_size.png diff --git a/docs/qa/v034/img/homogeneous/block_rate_regular.png b/docs/qa/img34/homogeneous/block_rate_regular.png similarity index 100% rename from docs/qa/v034/img/homogeneous/block_rate_regular.png rename to docs/qa/img34/homogeneous/block_rate_regular.png diff --git a/docs/qa/v034/img/homogeneous/cpu.png b/docs/qa/img34/homogeneous/cpu.png similarity index 100% rename from docs/qa/v034/img/homogeneous/cpu.png rename to docs/qa/img34/homogeneous/cpu.png diff --git a/docs/qa/v034/img/homogeneous/memory.png b/docs/qa/img34/homogeneous/memory.png similarity index 100% rename from docs/qa/v034/img/homogeneous/memory.png rename to docs/qa/img34/homogeneous/memory.png diff --git a/docs/qa/v034/img/homogeneous/mempool_size.png b/docs/qa/img34/homogeneous/mempool_size.png similarity index 100% rename from docs/qa/v034/img/homogeneous/mempool_size.png rename to docs/qa/img34/homogeneous/mempool_size.png diff --git a/docs/qa/v034/img/homogeneous/peers.png b/docs/qa/img34/homogeneous/peers.png similarity index 100% rename from docs/qa/v034/img/homogeneous/peers.png rename to docs/qa/img34/homogeneous/peers.png diff --git a/docs/qa/v034/img/homogeneous/rounds.png b/docs/qa/img34/homogeneous/rounds.png similarity index 100% rename from docs/qa/v034/img/homogeneous/rounds.png rename to docs/qa/img34/homogeneous/rounds.png diff --git a/docs/qa/v034/img/homogeneous/total_txs_rate_regular.png b/docs/qa/img34/homogeneous/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v034/img/homogeneous/total_txs_rate_regular.png rename to docs/qa/img34/homogeneous/total_txs_rate_regular.png diff --git a/docs/qa/v034/img/v034_200node_latencies.png b/docs/qa/img34/v034_200node_latencies.png similarity index 100% rename from docs/qa/v034/img/v034_200node_latencies.png rename to docs/qa/img34/v034_200node_latencies.png diff --git a/docs/qa/v034/img/v034_200node_latencies_zoomed.png b/docs/qa/img34/v034_200node_latencies_zoomed.png similarity index 100% rename from docs/qa/v034/img/v034_200node_latencies_zoomed.png rename to docs/qa/img34/v034_200node_latencies_zoomed.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/all_experiments.png b/docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/all_experiments.png rename to docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/avg_cpu.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/avg_cpu.png rename to docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/avg_memory.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/avg_memory.png rename to docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/avg_mempool_size.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/avg_mempool_size.png rename to docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/block_rate_regular.png b/docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/block_rate_regular.png rename to docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/c2r200_merged.png b/docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/c2r200_merged.png rename to docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/cpu.png b/docs/qa/img34/v034_200node_tm2cmt1/cpu.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/cpu.png rename to docs/qa/img34/v034_200node_tm2cmt1/cpu.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/memory.png b/docs/qa/img34/v034_200node_tm2cmt1/memory.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/memory.png rename to docs/qa/img34/v034_200node_tm2cmt1/memory.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/mempool_size.png b/docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/mempool_size.png rename to docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/peers.png b/docs/qa/img34/v034_200node_tm2cmt1/peers.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/peers.png rename to docs/qa/img34/v034_200node_tm2cmt1/peers.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/rounds.png b/docs/qa/img34/v034_200node_tm2cmt1/rounds.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/rounds.png rename to docs/qa/img34/v034_200node_tm2cmt1/rounds.png diff --git a/docs/qa/v034/img/v034_200node_tm2cmt1/total_txs_rate_regular.png b/docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v034/img/v034_200node_tm2cmt1/total_txs_rate_regular.png rename to docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png diff --git a/docs/qa/v034/img/v034_latency_throughput.png b/docs/qa/img34/v034_latency_throughput.png similarity index 100% rename from docs/qa/v034/img/v034_latency_throughput.png rename to docs/qa/img34/v034_latency_throughput.png diff --git a/docs/qa/v034/img/v034_r200c2_heights.png b/docs/qa/img34/v034_r200c2_heights.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_heights.png rename to docs/qa/img34/v034_r200c2_heights.png diff --git a/docs/qa/v034/img/v034_r200c2_load-runner.png b/docs/qa/img34/v034_r200c2_load-runner.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_load-runner.png rename to docs/qa/img34/v034_r200c2_load-runner.png diff --git a/docs/qa/v034/img/v034_r200c2_load1.png b/docs/qa/img34/v034_r200c2_load1.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_load1.png rename to docs/qa/img34/v034_r200c2_load1.png diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size.png b/docs/qa/img34/v034_r200c2_mempool_size.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_mempool_size.png rename to docs/qa/img34/v034_r200c2_mempool_size.png diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png b/docs/qa/img34/v034_r200c2_mempool_size_avg.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_mempool_size_avg.png rename to docs/qa/img34/v034_r200c2_mempool_size_avg.png diff --git a/docs/qa/v034/img/v034_r200c2_peers.png b/docs/qa/img34/v034_r200c2_peers.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_peers.png rename to docs/qa/img34/v034_r200c2_peers.png diff --git a/docs/qa/v034/img/v034_r200c2_rounds.png b/docs/qa/img34/v034_r200c2_rounds.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_rounds.png rename to docs/qa/img34/v034_r200c2_rounds.png diff --git a/docs/qa/v034/img/v034_r200c2_rss.png b/docs/qa/img34/v034_r200c2_rss.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_rss.png rename to docs/qa/img34/v034_r200c2_rss.png diff --git a/docs/qa/v034/img/v034_r200c2_rss_avg.png b/docs/qa/img34/v034_r200c2_rss_avg.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_rss_avg.png rename to docs/qa/img34/v034_r200c2_rss_avg.png diff --git a/docs/qa/v034/img/v034_r200c2_total-txs.png b/docs/qa/img34/v034_r200c2_total-txs.png similarity index 100% rename from docs/qa/v034/img/v034_r200c2_total-txs.png rename to docs/qa/img34/v034_r200c2_total-txs.png diff --git a/docs/qa/v034/img/v034_report_tabbed.txt b/docs/qa/img34/v034_report_tabbed.txt similarity index 100% rename from docs/qa/v034/img/v034_report_tabbed.txt rename to docs/qa/img34/v034_report_tabbed.txt diff --git a/docs/qa/v034/img/v034_rotating_heights.png b/docs/qa/img34/v034_rotating_heights.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_heights.png rename to docs/qa/img34/v034_rotating_heights.png diff --git a/docs/qa/v034/img/v034_rotating_heights_ephe.png b/docs/qa/img34/v034_rotating_heights_ephe.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_heights_ephe.png rename to docs/qa/img34/v034_rotating_heights_ephe.png diff --git a/docs/qa/v034/img/v034_rotating_latencies.png b/docs/qa/img34/v034_rotating_latencies.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_latencies.png rename to docs/qa/img34/v034_rotating_latencies.png diff --git a/docs/qa/v034/img/v034_rotating_latencies_uniq.png b/docs/qa/img34/v034_rotating_latencies_uniq.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_latencies_uniq.png rename to docs/qa/img34/v034_rotating_latencies_uniq.png diff --git a/docs/qa/v034/img/v034_rotating_load1.png b/docs/qa/img34/v034_rotating_load1.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_load1.png rename to docs/qa/img34/v034_rotating_load1.png diff --git a/docs/qa/v034/img/v034_rotating_peers.png b/docs/qa/img34/v034_rotating_peers.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_peers.png rename to docs/qa/img34/v034_rotating_peers.png diff --git a/docs/qa/v034/img/v034_rotating_rss_avg.png b/docs/qa/img34/v034_rotating_rss_avg.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_rss_avg.png rename to docs/qa/img34/v034_rotating_rss_avg.png diff --git a/docs/qa/v034/img/v034_rotating_total-txs.png b/docs/qa/img34/v034_rotating_total-txs.png similarity index 100% rename from docs/qa/v034/img/v034_rotating_total-txs.png rename to docs/qa/img34/v034_rotating_total-txs.png diff --git a/docs/qa/v037/img/200nodes_cmt037/all_experiments.png b/docs/qa/img37/200nodes_cmt037/all_experiments.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/all_experiments.png rename to docs/qa/img37/200nodes_cmt037/all_experiments.png diff --git a/docs/qa/v037/img/200nodes_cmt037/avg_mempool_size.png b/docs/qa/img37/200nodes_cmt037/avg_mempool_size.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/avg_mempool_size.png rename to docs/qa/img37/200nodes_cmt037/avg_mempool_size.png diff --git a/docs/qa/v037/img/200nodes_cmt037/block_rate.png b/docs/qa/img37/200nodes_cmt037/block_rate.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/block_rate.png rename to docs/qa/img37/200nodes_cmt037/block_rate.png diff --git a/docs/qa/v037/img/200nodes_cmt037/cpu.png b/docs/qa/img37/200nodes_cmt037/cpu.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/cpu.png rename to docs/qa/img37/200nodes_cmt037/cpu.png diff --git a/docs/qa/v037/img/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png b/docs/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png rename to docs/qa/img37/200nodes_cmt037/e_75cb89a8-f876-4698-82f3-8aaab0b361af.png diff --git a/docs/qa/v037/img/200nodes_cmt037/memory.png b/docs/qa/img37/200nodes_cmt037/memory.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/memory.png rename to docs/qa/img37/200nodes_cmt037/memory.png diff --git a/docs/qa/v037/img/200nodes_cmt037/mempool_size.png b/docs/qa/img37/200nodes_cmt037/mempool_size.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/mempool_size.png rename to docs/qa/img37/200nodes_cmt037/mempool_size.png diff --git a/docs/qa/v037/img/200nodes_cmt037/peers.png b/docs/qa/img37/200nodes_cmt037/peers.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/peers.png rename to docs/qa/img37/200nodes_cmt037/peers.png diff --git a/docs/qa/v037/img/200nodes_cmt037/rounds.png b/docs/qa/img37/200nodes_cmt037/rounds.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/rounds.png rename to docs/qa/img37/200nodes_cmt037/rounds.png diff --git a/docs/qa/v037/img/200nodes_cmt037/total_txs_rate.png b/docs/qa/img37/200nodes_cmt037/total_txs_rate.png similarity index 100% rename from docs/qa/v037/img/200nodes_cmt037/total_txs_rate.png rename to docs/qa/img37/200nodes_cmt037/total_txs_rate.png diff --git a/docs/qa/v037/img/200nodes_tm037/avg_mempool_size.png b/docs/qa/img37/200nodes_tm037/avg_mempool_size.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/avg_mempool_size.png rename to docs/qa/img37/200nodes_tm037/avg_mempool_size.png diff --git a/docs/qa/v037/img/200nodes_tm037/block_rate_regular.png b/docs/qa/img37/200nodes_tm037/block_rate_regular.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/block_rate_regular.png rename to docs/qa/img37/200nodes_tm037/block_rate_regular.png diff --git a/docs/qa/v037/img/200nodes_tm037/cpu.png b/docs/qa/img37/200nodes_tm037/cpu.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/cpu.png rename to docs/qa/img37/200nodes_tm037/cpu.png diff --git a/docs/qa/v037/img/200nodes_tm037/memory.png b/docs/qa/img37/200nodes_tm037/memory.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/memory.png rename to docs/qa/img37/200nodes_tm037/memory.png diff --git a/docs/qa/v037/img/200nodes_tm037/mempool_size.png b/docs/qa/img37/200nodes_tm037/mempool_size.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/mempool_size.png rename to docs/qa/img37/200nodes_tm037/mempool_size.png diff --git a/docs/qa/v037/img/200nodes_tm037/peers.png b/docs/qa/img37/200nodes_tm037/peers.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/peers.png rename to docs/qa/img37/200nodes_tm037/peers.png diff --git a/docs/qa/v037/img/200nodes_tm037/rounds.png b/docs/qa/img37/200nodes_tm037/rounds.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/rounds.png rename to docs/qa/img37/200nodes_tm037/rounds.png diff --git a/docs/qa/v037/img/200nodes_tm037/total_txs_rate_regular.png b/docs/qa/img37/200nodes_tm037/total_txs_rate_regular.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/total_txs_rate_regular.png rename to docs/qa/img37/200nodes_tm037/total_txs_rate_regular.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_200node_latencies.png b/docs/qa/img37/200nodes_tm037/v037_200node_latencies.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_200node_latencies.png rename to docs/qa/img37/200nodes_tm037/v037_200node_latencies.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_latency_throughput.png b/docs/qa/img37/200nodes_tm037/v037_latency_throughput.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_latency_throughput.png rename to docs/qa/img37/200nodes_tm037/v037_latency_throughput.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_heights.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_heights.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_heights.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_heights.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_load1.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_load1.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_load1.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_load1.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_mempool_size.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_mempool_size.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_mempool_size_avg.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_mempool_size_avg.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_mempool_size_avg.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_peers.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_peers.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_peers.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_peers.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_rounds.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_rounds.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_rounds.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_rounds.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_rss.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_rss.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_rss.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_rss.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_rss_avg.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_rss_avg.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_rss_avg.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_r200c2_total-txs.png b/docs/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_r200c2_total-txs.png rename to docs/qa/img37/200nodes_tm037/v037_r200c2_total-txs.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_report_tabbed.txt b/docs/qa/img37/200nodes_tm037/v037_report_tabbed.txt similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_report_tabbed.txt rename to docs/qa/img37/200nodes_tm037/v037_report_tabbed.txt diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_heights.png b/docs/qa/img37/200nodes_tm037/v037_rotating_heights.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_heights.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_heights.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_heights_ephe.png b/docs/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_heights_ephe.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_heights_ephe.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_latencies.png b/docs/qa/img37/200nodes_tm037/v037_rotating_latencies.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_latencies.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_latencies.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_load1.png b/docs/qa/img37/200nodes_tm037/v037_rotating_load1.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_load1.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_load1.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_peers.png b/docs/qa/img37/200nodes_tm037/v037_rotating_peers.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_peers.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_peers.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_rss_avg.png b/docs/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_rss_avg.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_rss_avg.png diff --git a/docs/qa/v037/img/200nodes_tm037/v037_rotating_total-txs.png b/docs/qa/img37/200nodes_tm037/v037_rotating_total-txs.png similarity index 100% rename from docs/qa/v037/img/200nodes_tm037/v037_rotating_total-txs.png rename to docs/qa/img37/200nodes_tm037/v037_rotating_total-txs.png diff --git a/docs/qa/method.md b/docs/qa/method.md index 5326f935a1b..9a0f9119666 100644 --- a/docs/qa/method.md +++ b/docs/qa/method.md @@ -1,6 +1,8 @@ --- order: 1 -title: Method +parent: + title: QA Process + order: 1 --- # Method @@ -106,11 +108,11 @@ The CometBFT team should improve it at every iteration to increase the amount of 3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate * If you are looking for the saturation point * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, - copy its related lines to the filename that matches the number of connections, for example + copy its related lines to the filename that matches the number of connections, for example ```bash for cnum in 1 2 3 4; do echo "$cnum"; grep "Connections: $cnum" results/report.txt -B 2 -A 10 > results/report$cnum.txt; done ``` - + * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. * Otherwise just keep `report.txt`, and skip step 4. 4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side @@ -229,7 +231,7 @@ This section explains how the tests were carried out for reproducibility purpose 7. On a different shell, * run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y` * `X` and `Y` should reflect a load below the saturation point (see, e.g., - [this paragraph](./v034/README.md#finding-the-saturation-point) for further info) + [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) for further info) 8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length of the experiment. diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 97ebd7fb90b..5fa1f8e70fe 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -3,7 +3,7 @@ info: title: CometBFT RPC contact: name: CometBFT RPC - url: https://github.com/cometbft/cometbft/issues/new/choose + url: https://docs.cometbft.com/main/rpc description: | CometBFT supports the following RPC protocols: @@ -17,12 +17,22 @@ info: `$CMTHOME/config/config.toml` file or by using the `--rpc.X` command-line flags. - Default rpc listen address is `tcp://0.0.0.0:26657`. + Default rpc listen address is `tcp://127.0.0.1:26657`. To set another address, set the `laddr` config parameter to desired value. CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters. + If testing using a local RPC node, under the `[rpc]` + section change the 'cors_allowed_origins' property, please add the URL of + the site where this OpenAPI document is running, for example: + + `cors_allowed_origins = ["http://localhost:8088"]` + + or if testing from the official documentation site: + + `cors_allowed_origins = ["https://docs.cometbft.com"]` + ## Arguments Arguments which expect strings or byte arrays may be passed as quoted @@ -47,22 +57,21 @@ info: Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets. - Example using https://github.com/hashrocket/ws: + For example using the [websocat](https://github.com/vi/websocat) tool, you can subscribe for 'NewBlock` events + with the following command: + + echo '{ "jsonrpc": "2.0","method": "subscribe","id": 0,"params": {"query": "tm.event='"'NewBlock'"'"} }' | websocat -n -t ws://127.0.0.1:26657/websocket - ws ws://localhost:26657/websocket - > { "jsonrpc": "2.0", "method": "subscribe", "params": ["tm.event='NewBlock'"], "id": 1 } version: "main" license: name: Apache 2.0 url: https://github.com/cometbft/cometbft/blob/main/LICENSE servers: - - url: https://rpc.cosmos.network - description: Cosmos mainnet node to interact with the CometBFT RPC + - url: https://rpc.cosmos.directory/cosmoshub + description: Interact with the CometBFT RPC from a public node in the Cosmos registry - url: http://localhost:26657 - description: Interact with the CometBFT RPC locally on your device + description: Interact with CometBFT RPC node running locally tags: - - name: Websocket - description: Subscribe/unsubscribe are reserved for websocket events. - name: Info description: Informations about the node APIs - name: Tx @@ -94,9 +103,9 @@ paths: (https://github.com/tendermint/tendermint/issues/3322) - Please refer to - https://docs.cometbft.com/main/core/using-cometbft.html#formatting - for formatting/encoding rules. + Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + for additional details + parameters: - in: query name: tx @@ -138,9 +147,9 @@ paths: (https://github.com/tendermint/tendermint/issues/3322) 3. node can be offline - Please refer to - https://docs.cometbft.com/main/core/using-cometbft.html#formatting - for formatting/encoding rules. + Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + for additional details + parameters: - in: query name: tx @@ -180,9 +189,9 @@ paths: If CheckTx or DeliverTx fail, no error will be returned, but the returned result will contain a non-OK ABCI code. - Please refer to - https://docs.cometbft.com/main/core/using-cometbft.html#formatting - for formatting/encoding rules. + Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + for additional details + parameters: - in: query name: tx @@ -213,9 +222,8 @@ paths: description: | The transaction won't be added to the mempool. - Please refer to - https://docs.cometbft.com/main/core/using-cometbft.html#formatting - for formatting/encoding rules. + Please refer to [formatting/encoding rules](https://docs.cometbft.com/main/core/using-cometbft.html#formatting) + for additional details Upon success, the `Cache-Control` header will be set with the default maximum age. @@ -240,225 +248,6 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" - /subscribe: - get: - summary: Subscribe for events via WebSocket. - tags: - - Websocket - operationId: subscribe - description: | - To tell which events you want, you need to provide a query. query is a - string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS" AND "EXISTS". operand - can be a string (escaped with single quotes), number, date or time. - - Examples: - tm.event = 'NewBlock' # new blocks - tm.event = 'CompleteProposal' # node got a complete proposal - tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction - tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block - tx.height = 5 # all txs of the fifth block - - CometBFT provides a few predefined keys: tm.event, tx.hash and tx.height. - Note for transactions, you can define additional keys by providing events with - DeliverTx response. - - import ( - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/libs/pubsub/query" - ) - - abci.ResponseDeliverTx{ - Events: []abci.Event{ - { - Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrA"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcX"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, - }, - }, - { - Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrB"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcY"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, - }, - }, - { - Type: "transfer", - Attributes: abci.EventAttribute{ - {Key: []byte("sender"), Value: []byte("AddrC"), Index: true}, - {Key: []byte("recipient"), Value: []byte("AddrD"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - }, - }, - }, - } - - All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. - In the above examples, the following keys would be indexed: - - rewards.withdraw.address - - rewards.withdraw.source - - rewards.withdraw.amount - - rewards.withdraw.balance - - transfer.sender - - transfer.recipient - - transfer.amount - - Multiple event types with duplicate keys are allowed and are meant to - categorize unique and distinct events. In the above example, all events - indexed under the key `rewards.withdraw.address` will have the following - values stored and queryable: - - - AddrA - - AddrB - - To create a query for txs where address AddrA withdrew rewards: - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") - - To create a query for txs where address AddrA withdrew rewards from source Y: - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") - - To create a query for txs where AddrA transferred funds: - query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") - - The following queries would return no results: - query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") - - See list of all possible events here - https://godoc.org/github.com/cometbft/cometbft/types#pkg-constants - - For complete query syntax, check out - https://godoc.org/github.com/cometbft/cometbft/libs/pubsub/query. - - ```go - import rpchttp "github.com/cometbft/rpc/client/http" - import "github.com/cometbft/cometbft/types" - - client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") - err := client.Start() - if err != nil { - handle error - } - defer client.Stop() - ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) - defer cancel() - query := "tm.event = 'Tx' AND tx.height = 3" - txs, err := client.Subscribe(ctx, "test-client", query) - if err != nil { - handle error - } - - go func() { - for e := range txs { - fmt.Println("got ", e.Data.(types.EventDataTx)) - } - }() - ``` - - NOTE: if you're not reading events fast enough, CometBFT might - terminate the subscription. - parameters: - - in: query - name: query - required: true - schema: - type: string - example: tm.event = 'Tx' AND tx.height = 5 - description: | - query is a string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a - string (escaped with single quotes), number, date or time. - responses: - "200": - description: empty answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: empty error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" - /unsubscribe: - get: - summary: Unsubscribe from event on Websocket - tags: - - Websocket - operationId: unsubscribe - description: | - ```go - client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") - err := client.Start() - if err != nil { - handle error - } - defer client.Stop() - query := "tm.event = 'Tx' AND tx.height = 3" - err = client.Unsubscribe(context.Background(), "test-client", query) - if err != nil { - handle error - } - ``` - parameters: - - in: query - name: query - required: true - schema: - type: string - example: tm.event = 'Tx' AND tx.height = 5 - description: | - query is a string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a - string (escaped with single quotes), number, date or time. - responses: - "200": - description: Answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: Error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" - /unsubscribe_all: - get: - summary: Unsubscribe from all events via WebSocket - tags: - - Websocket - operationId: unsubscribe_all - description: | - Unsubscribe from all events via WebSocket - responses: - "200": - description: empty answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: empty error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" /health: get: summary: Node heartbeat From 74b7562d5432f6fd158eb5b9f99b8606523fa9ad Mon Sep 17 00:00:00 2001 From: Andy Nogueira Date: Mon, 20 Mar 2023 17:13:46 -0400 Subject: [PATCH 2/5] rename title Co-authored-by: Thane Thomson --- docs/qa/method.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/qa/method.md b/docs/qa/method.md index 9a0f9119666..6de0cbcf80c 100644 --- a/docs/qa/method.md +++ b/docs/qa/method.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: QA Process + title: Method order: 1 --- From 9a0d52a9839467d9961bd4d21c356075598fffb2 Mon Sep 17 00:00:00 2001 From: Andy Nogueira Date: Mon, 20 Mar 2023 17:15:30 -0400 Subject: [PATCH 3/5] fix text Co-authored-by: Thane Thomson --- rpc/openapi/openapi.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 5fa1f8e70fe..0bdacdc6aa4 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -17,7 +17,7 @@ info: `$CMTHOME/config/config.toml` file or by using the `--rpc.X` command-line flags. - Default rpc listen address is `tcp://127.0.0.1:26657`. + The default RPC listen address is `tcp://127.0.0.1:26657`. To set another address, set the `laddr` config parameter to desired value. CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` From a3967b8adfcd3253d69d69aa991943f360da8f33 Mon Sep 17 00:00:00 2001 From: Andy Nogueira Date: Mon, 20 Mar 2023 17:16:01 -0400 Subject: [PATCH 4/5] add backquotes Co-authored-by: Thane Thomson --- rpc/openapi/openapi.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 0bdacdc6aa4..c55368ffbf5 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -24,7 +24,7 @@ info: config parameters. If testing using a local RPC node, under the `[rpc]` - section change the 'cors_allowed_origins' property, please add the URL of + section change the `cors_allowed_origins` property, please add the URL of the site where this OpenAPI document is running, for example: `cors_allowed_origins = ["http://localhost:8088"]` From e3e280124ef592b77fd47f87b60153fa166695da Mon Sep 17 00:00:00 2001 From: Andy Nogueira Date: Mon, 20 Mar 2023 17:17:33 -0400 Subject: [PATCH 5/5] update contact --- rpc/openapi/openapi.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index c55368ffbf5..b0705576857 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -2,8 +2,8 @@ openapi: 3.0.0 info: title: CometBFT RPC contact: - name: CometBFT RPC - url: https://docs.cometbft.com/main/rpc + name: CometBFT + url: https://cometbft.com/ description: | CometBFT supports the following RPC protocols: