Skip to content

Commit cf5a2b4

Browse files
authored
Merge pull request #2103 from IntersectMBO/kderme/integrate-node-10.7-fixes
Prepare13.7.0.3
2 parents da66286 + 964831d commit cf5a2b4

12 files changed

Lines changed: 272 additions & 17 deletions

File tree

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# Revision history for cardano-db-sync
22

3+
## 13.7.0.3
4+
- Migration deletes legacy zero-amount `epoch_stake` rows
5+
- SMASH: validate `PoolMetadataHash` inputs, return 400 instead of 500
6+
- Ledger snapshots now use the consensus directory format (`<slot>/state`, `meta`, `utxoSize`). Old snapshots are not compatible.
7+
- Experimental: LSM-backed on-disk UTxO (opt-in via `ledger_backend` config, see [doc/configuration.md](doc/configuration.md#ledger-backend))
8+
39
## 13.7.0.2
410
- Fix slow rollbacks caused by suboptimal query plans on large tables [#2083]
511
- Update to cardano-node 10.7.0

cardano-db-sync/src/Cardano/DbSync/Era/Universal/Insert/LedgerEvent.hs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ insertNewEpochLedgerEvents syncEnv applyRes currentEpochNo@(EpochNo curEpoch) =
127127
LedgerRestrainedRewards e rwd creds ->
128128
adjustEpochRewards syncEnv ntw e rwd creds
129129
LedgerTotalRewards _e rwd ->
130-
validateEpochRewards tracer ntw (subFromCurrentEpoch 2) currentEpochNo rwd
130+
validateEpochRewards syncEnv ntw (subFromCurrentEpoch 2) currentEpochNo rwd
131131
LedgerAdaPots _ ->
132132
pure () -- These are handled separately by insertBlock
133133
LedgerGovInfo enacted dropped expired uncl -> do

cardano-db-sync/src/Cardano/DbSync/Era/Universal/Validate.hs

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,13 +70,13 @@ validateEpochStake syncEnv applyRes firstCall = case apOldLedger applyRes of
7070
tracer = getTrace syncEnv
7171

7272
validateEpochRewards ::
73-
Trace IO Text ->
73+
SyncEnv ->
7474
Network ->
7575
EpochNo ->
7676
EpochNo ->
7777
Map StakeCred (Set Ledger.Reward) ->
7878
ExceptT SyncNodeError DB.DbM ()
79-
validateEpochRewards tracer network _earnedEpochNo spendableEpochNo rmap = do
79+
validateEpochRewards syncEnv network earnedEpochNo spendableEpochNo rmap = do
8080
actualCount <- lift $ DB.queryNormalEpochRewardCount (unEpochNo spendableEpochNo)
8181
if actualCount /= expectedCount
8282
then do
@@ -90,7 +90,19 @@ validateEpochRewards tracer network _earnedEpochNo spendableEpochNo rmap = do
9090
, " but got "
9191
, textShow actualCount
9292
]
93-
logFullRewardMap tracer spendableEpochNo network (convertPoolRewards rmap)
93+
let rewards = convertPoolRewards rmap
94+
logFullRewardMap tracer spendableEpochNo network rewards
95+
addRewardConstraintsIfNotExist syncEnv tracer
96+
insertRewards syncEnv network earnedEpochNo spendableEpochNo (Map.toList $ Generic.unRewards rewards)
97+
newCount <- lift $ DB.queryNormalEpochRewardCount (unEpochNo spendableEpochNo)
98+
liftIO . logInfo tracer $
99+
mconcat
100+
[ "validateEpochRewards: inserted missing rewards. Count now "
101+
, textShow newCount
102+
, " (expected "
103+
, textShow expectedCount
104+
, ")"
105+
]
94106
else do
95107
liftIO
96108
. logInfo tracer
@@ -101,6 +113,8 @@ validateEpochRewards tracer network _earnedEpochNo spendableEpochNo rmap = do
101113
, textShow actualCount
102114
]
103115
where
116+
tracer = getTrace syncEnv
117+
104118
expectedCount :: Word64
105119
expectedCount = fromIntegral . sum $ map Set.size (Map.elems rmap)
106120

cardano-smash-server/src/Cardano/SMASH/Server/PoolDataLayer.hs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ import Cardano.SMASH.Server.Types
1616
import qualified Data.ByteString.Base16 as Base16
1717
import qualified Data.Map.Strict as Map
1818
import Data.Pool (Pool)
19-
import qualified Data.Text as Text
2019
import qualified Data.Text.Encoding as Text
2120
import Data.Time.Clock (UTCTime)
2221
import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds)
@@ -217,19 +216,23 @@ isRegistered pid (mEpochNo, certs) = case Map.lookup pid certs of
217216
Just (Db.Retirement retEpochNo) -> Right $ Just retEpochNo > mEpochNo
218217
Just (Db.Register _) -> Right True
219218

219+
-- PoolId is validated at construction via parsePoolId in FromHttpApiData/FromJSON,
220+
-- and toDbPoolId always produces valid hex, so this decode cannot fail in practice.
220221
fromDbPoolId :: PoolId -> ByteString
221222
fromDbPoolId pid =
222223
case Base16.decode $ Text.encodeUtf8 $ getPoolId pid of
223-
Left err -> panic $ Text.pack err
224+
Left err -> error $ "fromDbPoolId: invariant violated, PoolId contains invalid hex: " <> err
224225
Right bs -> bs
225226

226227
toDbPoolId :: ByteString -> PoolId
227228
toDbPoolId bs = PoolId $ Text.decodeUtf8 $ Base16.encode bs
228229

230+
-- PoolMetadataHash is validated at construction via parsePoolMetaHash in FromHttpApiData/FromJSON,
231+
-- and toDbServantMetaHash always produces valid hex, so this decode cannot fail in practice.
229232
fromDbPoolMetaHash :: PoolMetadataHash -> ByteString
230233
fromDbPoolMetaHash pmh =
231234
case Base16.decode $ Text.encodeUtf8 $ getPoolMetadataHash pmh of
232-
Left err -> panic $ Text.pack err
235+
Left err -> error $ "fromDbPoolMetaHash: invariant violated, PoolMetadataHash contains invalid hex: " <> err
233236
Right bs -> bs
234237

235238
toDbServantMetaHash :: ByteString -> PoolMetadataHash

cardano-smash-server/src/Cardano/SMASH/Server/Types.hs

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,20 +122,28 @@ instance ToJSON PoolMetadataHash where
122122
[ "poolHash" .= poolHash
123123
]
124124

125-
-- The validation of @PoolMetadataHash@ is a bit more involved and would require
126-
-- an analysis with some bounds on the size.
127125
instance FromJSON PoolMetadataHash where
128126
parseJSON = withObject "PoolMetadataHash" $ \o -> do
129127
poolHash <- o .: "poolHash"
130-
pure $ PoolMetadataHash poolHash
128+
case parsePoolMetaHash poolHash of
129+
Left err -> fail $ toS err
130+
Right pmh -> pure pmh
131131

132132
instance ToSchema PoolMetadataHash
133133

134134
instance ToParamSchema PoolMetadataHash
135135

136-
-- TODO: Add sanity checks
137136
instance FromHttpApiData PoolMetadataHash where
138-
parseUrlPiece poolMetadataHash = Right $ PoolMetadataHash poolMetadataHash
137+
parseUrlPiece = parsePoolMetaHash
138+
139+
-- | Pool metadata hash is a SHA-256 hash (32 bytes), hex-encoded as 64 characters.
140+
parsePoolMetaHash :: Text -> Either Text PoolMetadataHash
141+
parsePoolMetaHash poolHash =
142+
case B16.decode (encodeUtf8 poolHash) of
143+
Left _ -> Left "Unable to parse pool metadata hash. Expected hex encoding."
144+
Right bs
145+
| BS.length bs == 32 -> Right $ PoolMetadataHash poolHash
146+
| otherwise -> Left "Unable to parse pool metadata hash. Expected 32 bytes (64 hex characters)."
139147

140148
-- Result wrapper.
141149
newtype ApiResult err a = ApiResult (Either err a)
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
CREATE FUNCTION migrate() RETURNS void AS $$
2+
DECLARE
3+
next_version int ;
4+
BEGIN
5+
SELECT stage_two + 1 INTO next_version FROM schema_version ;
6+
IF next_version = 47 THEN
7+
8+
DELETE FROM "epoch_stake" WHERE "amount" = 0 ;
9+
10+
UPDATE schema_version SET stage_two = next_version ;
11+
RAISE NOTICE 'DB has been migrated to stage_two version %', next_version ;
12+
END IF ;
13+
END ;
14+
$$ LANGUAGE plpgsql ;
15+
16+
SELECT migrate() ;
17+
DROP FUNCTION migrate() ;
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
#!/usr/bin/env bash
2+
#
3+
# Validate the epoch table and optionally fix mismatches.
4+
#
5+
# Usage:
6+
# scripts/check-and-fix-epoch-table.sh [epoch_no]
7+
#
8+
# epoch_no: specific epoch to check (default: -1 = all epochs)
9+
#
10+
# Requires PGPASSFILE to be set (or ~/.pgpass configured), and expects the
11+
# database name to be on the PGDATABASE env var, the first line of the pgpass
12+
# file, or passed explicitly via psql_args.
13+
#
14+
# Examples:
15+
# PGPASSFILE=config/pgpass-mainnet scripts/check-and-fix-epoch-table.sh
16+
# PGPASSFILE=config/pgpass-mainnet scripts/check-and-fix-epoch-table.sh 620
17+
set -euo pipefail
18+
19+
EPOCH_NO=${1:--1}
20+
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
21+
VALIDATE_SQL="$SCRIPT_DIR/validate-epoch-table.sql"
22+
FIX_SQL="$SCRIPT_DIR/fix-epoch-table.sql"
23+
24+
if [[ ! -f "$VALIDATE_SQL" || ! -f "$FIX_SQL" ]]; then
25+
echo "Error: expected $VALIDATE_SQL and $FIX_SQL next to this script." >&2
26+
exit 1
27+
fi
28+
29+
# Derive the DB name from PGDATABASE or pgpass (field 5 of first line).
30+
DB_NAME=${PGDATABASE:-}
31+
if [[ -z "$DB_NAME" && -n "${PGPASSFILE:-}" && -f "$PGPASSFILE" ]]; then
32+
DB_NAME=$(awk -F: 'NR==1{print $3}' "$PGPASSFILE")
33+
fi
34+
if [[ -z "$DB_NAME" ]]; then
35+
echo "Error: could not determine database name. Set PGDATABASE or PGPASSFILE." >&2
36+
exit 1
37+
fi
38+
39+
echo "Validating epoch table for database '$DB_NAME' (epoch_no=$EPOCH_NO)..."
40+
OUTPUT=$(psql -X --no-psqlrc -v ON_ERROR_STOP=1 -v epoch_no="$EPOCH_NO" -f "$VALIDATE_SQL" "$DB_NAME")
41+
42+
# Header has 2 lines (column names + separator). Tail has "(N rows)".
43+
# Count only data rows by filtering lines that look like data (start with a digit).
44+
MISMATCHES=$(echo "$OUTPUT" | awk '/^[[:space:]]*[0-9]/ {c++} END {print c+0}')
45+
46+
if [[ "$MISMATCHES" -eq 0 ]]; then
47+
echo "No mismatches. Epoch table is consistent."
48+
exit 0
49+
fi
50+
51+
echo
52+
echo "Found $MISMATCHES mismatching epoch(s):"
53+
echo "$OUTPUT"
54+
echo
55+
56+
read -r -p "Apply fix? [y/N] " REPLY
57+
case "$REPLY" in
58+
[yY]|[yY][eE][sS])
59+
echo "Applying fix..."
60+
psql -X --no-psqlrc -v ON_ERROR_STOP=1 -v epoch_no="$EPOCH_NO" -f "$FIX_SQL" "$DB_NAME"
61+
echo "Fix applied. Re-validating..."
62+
RECHECK=$(psql -X --no-psqlrc -v ON_ERROR_STOP=1 -v epoch_no="$EPOCH_NO" -f "$VALIDATE_SQL" "$DB_NAME")
63+
REMAINING=$(echo "$RECHECK" | awk '/^[[:space:]]*[0-9]/ {c++} END {print c+0}')
64+
if [[ "$REMAINING" -eq 0 ]]; then
65+
echo "Epoch table is now consistent."
66+
else
67+
echo "Warning: $REMAINING mismatch(es) remain:" >&2
68+
echo "$RECHECK" >&2
69+
exit 1
70+
fi
71+
;;
72+
*)
73+
echo "No changes applied."
74+
exit 0
75+
;;
76+
esac

scripts/fix-epoch-table.sql

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
-- Fix epoch table values by recalculating from block/tx tables and upserting.
2+
--
3+
-- Usage:
4+
-- Fix a specific epoch:
5+
-- psql -v epoch_no=620 -f scripts/fix-epoch-table.sql cexplorer
6+
--
7+
-- Fix all epochs:
8+
-- psql -v epoch_no=-1 -f scripts/fix-epoch-table.sql cexplorer
9+
--
10+
-- Wrap in a transaction so failures don't partially rewrite the table.
11+
12+
BEGIN;
13+
14+
WITH calculated AS (
15+
SELECT
16+
b.epoch_no,
17+
COUNT(DISTINCT b.id) AS blk_count,
18+
MIN(b.time) AS start_time,
19+
MAX(b.time) AS end_time,
20+
COALESCE(SUM(tx.out_sum), 0) AS out_sum,
21+
COALESCE(SUM(tx.fee), 0) AS fee_sum,
22+
COUNT(tx.id) AS tx_count
23+
FROM block b
24+
LEFT JOIN tx ON tx.block_id = b.id
25+
WHERE b.epoch_no IS NOT NULL
26+
AND (CAST(:epoch_no AS bigint) = -1 OR b.epoch_no = CAST(:epoch_no AS bigint))
27+
GROUP BY b.epoch_no
28+
)
29+
INSERT INTO epoch (no, out_sum, fees, tx_count, blk_count, start_time, end_time)
30+
SELECT
31+
epoch_no,
32+
out_sum,
33+
fee_sum,
34+
tx_count,
35+
blk_count,
36+
start_time,
37+
end_time
38+
FROM calculated
39+
ON CONFLICT (no) DO UPDATE SET
40+
out_sum = EXCLUDED.out_sum,
41+
fees = EXCLUDED.fees,
42+
tx_count = EXCLUDED.tx_count,
43+
blk_count = EXCLUDED.blk_count,
44+
start_time = EXCLUDED.start_time,
45+
end_time = EXCLUDED.end_time
46+
WHERE
47+
epoch.out_sum IS DISTINCT FROM EXCLUDED.out_sum
48+
OR epoch.fees IS DISTINCT FROM EXCLUDED.fees
49+
OR epoch.tx_count IS DISTINCT FROM EXCLUDED.tx_count
50+
OR epoch.blk_count IS DISTINCT FROM EXCLUDED.blk_count
51+
OR epoch.start_time IS DISTINCT FROM EXCLUDED.start_time
52+
OR epoch.end_time IS DISTINCT FROM EXCLUDED.end_time;
53+
54+
COMMIT;

scripts/run-schema-checks.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@
77
set -euo pipefail
88

99
DB="${1:?Usage: $0 <dbname>}"
10-
SCHEMA_DIR="$(cd "$(dirname "$0")/../schema" && pwd)"
10+
SCRIPTS_DIR="$(cd "$(dirname "$0")" && pwd)"
1111

1212
echo "Running referential integrity tests on ${DB}..."
13-
psql -d "$DB" -f "$SCHEMA_DIR/test-referential-integrity.sql"
13+
psql -d "$DB" -f "$SCRIPTS_DIR/test-referential-integrity.sql"
1414

1515
echo ""
1616
echo "Running uniqueness tests on ${DB}..."
17-
psql -d "$DB" -f "$SCHEMA_DIR/test-uniqueness.sql"
17+
psql -d "$DB" -f "$SCRIPTS_DIR/test-uniqueness.sql"
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
-- The constraint is immediately dropped after a successful check.
44
-- If it fails, a WARNING is raised with the violation details.
55
--
6-
-- Usage: psql -d <dbname> -f schema/test-referential-integrity.sql
6+
-- Usage: psql -d <dbname> -f scripts/test-referential-integrity.sql
77
-- Or run all tests: ./scripts/run-schema-checks.sh <dbname>
88
--
99
-- The entire script runs inside a transaction that is always rolled back,

0 commit comments

Comments
 (0)