summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/github-action-checks.yml19
-rw-r--r--.travis.yml7
-rw-r--r--README.mediawiki72
-rw-r--r--bip-0002.mediawiki2
-rw-r--r--bip-0012.mediawiki4
-rw-r--r--bip-0039.mediawiki4
-rw-r--r--bip-0084.mediawiki2
-rw-r--r--bip-0085.mediawiki80
-rw-r--r--bip-0086.mediawiki2
-rw-r--r--bip-0093.mediawiki599
-rw-r--r--bip-0119.mediawiki11
-rw-r--r--bip-0127.mediawiki1
-rw-r--r--bip-0129.mediawiki2
-rw-r--r--bip-0137.mediawiki2
-rw-r--r--bip-0151.mediawiki2
-rw-r--r--bip-0158.mediawiki6
-rw-r--r--bip-0174.mediawiki26
-rw-r--r--bip-0300.mediawiki299
-rw-r--r--bip-0324.mediawiki173
-rw-r--r--bip-0327.mediawiki829
-rw-r--r--bip-0327/gen_vectors_helper.py184
-rw-r--r--bip-0327/reference.py880
-rwxr-xr-xbip-0327/tests.sh8
-rw-r--r--bip-0327/vectors/det_sign_vectors.json144
-rw-r--r--bip-0327/vectors/key_agg_vectors.json88
-rw-r--r--bip-0327/vectors/key_sort_vectors.json18
-rw-r--r--bip-0327/vectors/nonce_agg_vectors.json51
-rw-r--r--bip-0327/vectors/nonce_gen_vectors.json44
-rw-r--r--bip-0327/vectors/sig_agg_vectors.json151
-rw-r--r--bip-0327/vectors/sign_verify_vectors.json212
-rw-r--r--bip-0327/vectors/tweak_vectors.json84
-rw-r--r--bip-0329.mediawiki22
-rw-r--r--bip-0340.mediawiki57
-rw-r--r--bip-0340/reference.py4
-rw-r--r--bip-0340/test-vectors.csv4
-rw-r--r--bip-0340/test-vectors.py20
-rw-r--r--bip-0341.mediawiki6
-rw-r--r--bip-0342.mediawiki2
-rw-r--r--bip-0343.mediawiki2
-rw-r--r--bip-0350.mediawiki2
-rw-r--r--bip-0370.mediawiki10
-rw-r--r--bip-0371.mediawiki6
-rw-r--r--bip-0380.mediawiki60
-rw-r--r--bip-0381.mediawiki48
-rw-r--r--bip-0382.mediawiki49
-rw-r--r--bip-0383.mediawiki34
-rw-r--r--bip-0384.mediawiki35
-rw-r--r--bip-0385.mediawiki22
-rw-r--r--bip-0386.mediawiki25
-rw-r--r--bip-0389.mediawiki109
-rwxr-xr-xscripts/buildtable.pl4
-rwxr-xr-xscripts/diffcheck.sh13
52 files changed, 4282 insertions, 258 deletions
diff --git a/.github/workflows/github-action-checks.yml b/.github/workflows/github-action-checks.yml
new file mode 100644
index 0000000..bfc014b
--- /dev/null
+++ b/.github/workflows/github-action-checks.yml
@@ -0,0 +1,19 @@
+name: GitHub Actions Check
+run-name: ${{ github.actor }} Checks 🚀
+on: [push, pull_request]
+jobs:
+ Link-Format-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/link-format-chk.sh
+ Build-Table-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
+ Diff-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/diffcheck.sh
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 70d339a..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-os: linux
-language: generic
-script:
- - scripts/link-format-chk.sh
- - scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
- - diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/after.diff || true
- - if git checkout HEAD^ && scripts/buildtable.pl >/tmp/table.mediawiki 2>/dev/null; then diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/before.diff || true; newdiff=$(diff -s /tmp/before.diff /tmp/after.diff -u | grep '^+'); if [ -n "$newdiff" ]; then echo "$newdiff"; exit 1; fi; else echo 'Cannot build previous commit table for comparison'; fi
diff --git a/README.mediawiki b/README.mediawiki
index f4a9ac0..6dbd798 100644
--- a/README.mediawiki
+++ b/README.mediawiki
@@ -440,7 +440,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| Derivation scheme for P2WPKH based accounts
| Pavol Rusnak
| Informational
-| Draft
+| Final
|-
| [[bip-0085.mediawiki|85]]
| Applications
@@ -452,7 +452,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| [[bip-0086.mediawiki|86]]
| Applications
| Key Derivation for Single Key P2TR Outputs
-| Andrew Chow
+| Ava Chow
| Standard
| Draft
|- style="background-color: #ffffcf"
@@ -484,6 +484,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Final
|-
+| [[bip-0093.mediawiki|93]]
+| Applications
+| codex32: Checksummed SSSS-aware BIP32 seeds
+| Leon Olsson Curr, Pearlwort Sneed
+| Informational
+| Draft
+|-
| [[bip-0098.mediawiki|98]]
| Consensus (soft fork)
| Fast Merkle Trees
@@ -893,7 +900,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| [[bip-0174.mediawiki|174]]
| Applications
| Partially Signed Bitcoin Transaction Format
-| Andrew Chow
+| Ava Chow
| Standard
| Final
|- style="background-color: #ffcfcf"
@@ -997,11 +1004,25 @@ Those proposing changes should consider that ultimately consent may rest with th
|-
| [[bip-0326.mediawiki|326]]
| Applications
-| Anti-fee-sniping protection in taproot transactions
+| Anti-fee-sniping in taproot transactions
| Chris Belcher
| Informational
| Draft
|-
+| [[bip-0327.mediawiki|327]]
+|
+| MuSig2 for BIP340-compatible Multi-Signatures
+| Jonas Nick, Tim Ruffing, Elliott Jin
+| Informational
+| Draft
+|-
+| [[bip-0329.mediawiki|329]]
+| Applications
+| Wallet Labels Export Format
+| Craig Raw
+| Informational
+| Draft
+|-
| [[bip-0330.mediawiki|330]]
| Peer Services
| Transaction announcements reconciliation
@@ -1022,41 +1043,41 @@ Those proposing changes should consider that ultimately consent may rest with th
| Suhas Daftuar
| Standard
| Draft
-|-
+|- style="background-color: #cfffcf"
| [[bip-0340.mediawiki|340]]
|
| Schnorr Signatures for secp256k1
| Pieter Wuille, Jonas Nick, Tim Ruffing
| Standard
-| Draft
-|-
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0341.mediawiki|341]]
| Consensus (soft fork)
| Taproot: SegWit version 1 spending rules
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
-| Draft
-|-
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0342.mediawiki|342]]
| Consensus (soft fork)
| Validation of Taproot Scripts
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
-| Draft
-|- style="background-color: #ffffcf"
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0343.mediawiki|343]]
| Consensus (soft fork)
| Mandatory activation of taproot deployment
| Shinobius, Michael Folkson
| Standard
-| Proposed
+| Final
|-
| [[bip-0350.mediawiki|350]]
| Applications
| Bech32m format for v1+ witness addresses
| Pieter Wuille
| Standard
-| Draft
+| Final
|-
| [[bip-0351.mediawiki|351]]
| Applications
@@ -1068,14 +1089,14 @@ Those proposing changes should consider that ultimately consent may rest with th
| [[bip-0370.mediawiki|370]]
| Applications
| PSBT Version 2
-| Andrew Chow
+| Ava Chow
| Standard
| Draft
|-
| [[bip-0371.mediawiki|371]]
| Applications
| Taproot Fields for PSBT
-| Andrew Chow
+| Ava Chow
| Standard
| Draft
|-
@@ -1089,49 +1110,56 @@ Those proposing changes should consider that ultimately consent may rest with th
| [[bip-0380.mediawiki|380]]
| Applications
| Output Script Descriptors General Operation
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0381.mediawiki|381]]
| Applications
| Non-Segwit Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0382.mediawiki|382]]
| Applications
| Segwit Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0383.mediawiki|383]]
| Applications
| Multisig Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0384.mediawiki|384]]
| Applications
| combo() Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0385.mediawiki|385]]
| Applications
| raw() and addr() Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| [[bip-0386.mediawiki|386]]
| Applications
| tr() Output Script Descriptors
-| Pieter Wuille, Andrew Chow
+| Pieter Wuille, Ava Chow
+| Informational
+| Draft
+|-
+| [[bip-0389.mediawiki|389]]
+| Applications
+| Multipath Descriptor Key Expressions
+| Ava Chow
| Informational
| Draft
|}
diff --git a/bip-0002.mediawiki b/bip-0002.mediawiki
index c6eb950..af8bb30 100644
--- a/bip-0002.mediawiki
+++ b/bip-0002.mediawiki
@@ -102,7 +102,7 @@ The BIP editors are intended to fulfill administrative and editorial responsibil
===Specification===
-BIPs should be written in mediawiki format.
+BIPs should be written in mediawiki or markdown format.
Each BIP should have the following parts:
diff --git a/bip-0012.mediawiki b/bip-0012.mediawiki
index 70069d6..bd3d88c 100644
--- a/bip-0012.mediawiki
+++ b/bip-0012.mediawiki
@@ -43,11 +43,11 @@ OP_EVAL allows the receiver of bitcoins to specify how they can be spent when th
If ''serialized script'' is a large or complicated multi-signature script, then the burden of paying for it (in increased transaction fees due to more signature operations or transaction size) is shifted from the sender to the receiver.
-The main objection to OP_EVAL is that it adds complexity, and complexity is the enemy of security. Also, evaluating data as code has a long record of being a source of security vulnerabilties.
+The main objection to OP_EVAL is that it adds complexity, and complexity is the enemy of security. Also, evaluating data as code has a long record of being a source of security vulnerabilities.
That same argument can be applied to the existing Bitcoin 'scripting' system; scriptPubKeys are transmit as data across the network and are then interpreted by every bitcoin implementation. OP_EVAL just moves the data that will be interpreted. It is debatable whether or not the entire idea of putting a little interpreted expression evaluation language at the core of Bitcoin was brilliant or stupid, but the existence of OP_EVAL does not make the expression language less secure.
-There is a 1-confirmation attack on old clients that interepret OP_EVAL as a no-op, but it is expensive and difficult in practice. The attack is:
+There is a 1-confirmation attack on old clients that interpret OP_EVAL as a no-op, but it is expensive and difficult in practice. The attack is:
# Attacker creates an OP_EVAL transaction that is valid as seen by old clients, but invalid for new clients.
# Attacker also creates a standard transaction that spends the OP_EVAL transaction, and pays the victim.
diff --git a/bip-0039.mediawiki b/bip-0039.mediawiki
index d8a4d25..5170edd 100644
--- a/bip-0039.mediawiki
+++ b/bip-0039.mediawiki
@@ -39,7 +39,7 @@ security is improved but the sentence length increases. We refer to the
initial entropy length as ENT. The allowed size of ENT is 128-256 bits.
First, an initial entropy of ENT bits is generated. A checksum is generated by
-taking the first <pre>ENT / 32</pre> bits of its SHA256 hash. This checksum is
+taking the first <code>ENT / 32</code> bits of its SHA256 hash. This checksum is
appended to the end of the initial entropy. Next, these concatenated bits
are split into groups of 11 bits, each encoding a number from 0-2047, serving
as an index into a wordlist. Finally, we convert these numbers into words and
@@ -168,6 +168,7 @@ Haskell:
JavaScript:
* https://github.com/bitpay/bitcore/tree/master/packages/bitcore-mnemonic
* https://github.com/bitcoinjs/bip39 (used by [[https://github.com/blockchain/My-Wallet-V3/blob/v3.8.0/src/hd-wallet.js#L121-L146|blockchain.info]])
+* https://github.com/dashhive/DashPhrase.js
* https://github.com/hujiulong/web-bip39
Java:
@@ -190,6 +191,7 @@ Swift:
* https://github.com/matter-labs/web3swift/blob/develop/Sources/web3swift/KeystoreManager/BIP39.swift
* https://github.com/zcash-hackworks/MnemonicSwift
* https://github.com/ShenghaiWang/BIP39
+* https://github.com/anquii/BIP39
C++:
* https://github.com/libbitcoin/libbitcoin-system/blob/master/include/bitcoin/system/wallet/mnemonic.hpp
diff --git a/bip-0084.mediawiki b/bip-0084.mediawiki
index dc5a05d..7f20217 100644
--- a/bip-0084.mediawiki
+++ b/bip-0084.mediawiki
@@ -5,7 +5,7 @@
Author: Pavol Rusnak <stick@satoshilabs.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0084
- Status: Draft
+ Status: Final
Type: Informational
Created: 2017-12-28
License: CC0-1.0
diff --git a/bip-0085.mediawiki b/bip-0085.mediawiki
index 6e7dd0e..0627755 100644
--- a/bip-0085.mediawiki
+++ b/bip-0085.mediawiki
@@ -106,7 +106,7 @@ OUTPUT
* AirGap Vault: [https://github.com/airgap-it/airgap-vault/commit/d64332fc2f332be622a1229acb27f621e23774d6]
-btc_hd_wallet: [https://github.com/scgbckbone/btc-hd-wallet]
+* btc_hd_wallet: [https://github.com/scgbckbone/btc-hd-wallet]
==Applications==
@@ -244,7 +244,7 @@ INPUT:
OUTPUT
* DERIVED ENTROPY=ead0b33988a616cf6a497f1c169d9e92562604e38305ccd3fc96f2252c177682
-* DERIVED WIF=xprv9s21ZrQH143K2srSbCSg4m4kLvPMzcWydgmKEnMmoZUurYuBuYG46c6P71UGXMzmriLzCCBvKQWBUv3vPB3m1SATMhp3uEjXHJ42jFg7myX
+* DERIVED XPRV=xprv9s21ZrQH143K2srSbCSg4m4kLvPMzcWydgmKEnMmoZUurYuBuYG46c6P71UGXMzmriLzCCBvKQWBUv3vPB3m1SATMhp3uEjXHJ42jFg7myX
===HEX===
Application number: 128169'
@@ -262,6 +262,82 @@ INPUT:
OUTPUT
* DERIVED ENTROPY=492db4698cf3b73a5a24998aa3e9d7fa96275d85724a91e71aa2d645442f878555d078fd1f1f67e368976f04137b1f7a0d19232136ca50c44614af72b5582a5c
+===PWD BASE64===
+Application number: 707764'
+
+The derivation path format is: <code>m/83696968'/707764'/{pwd_len}'/{index}'</code>
+
+`20 <= pwd_len <= 86`
+
+[https://datatracker.ietf.org/doc/html/rfc4648 Base64] encode the all 64 bytes of entropy.
+Remove any spaces or new lines inserted by Base64 encoding process. Slice base64 result string
+on index 0 to `pwd_len`. This slice is the password. As `pwd_len` is limited to 86, passwords will not contain padding.
+
+Entropy calculation:<br>
+R = 64 (base64 - do not count padding)<br>
+L = pwd_len<br>
+Entropy = log2(R ** L)<br>
+
+{| class="wikitable" style="margin:auto"
+! pwd_length !! (cca) entropy
+|-
+| 20 || 120.0
+|-
+| 24 || 144.0
+|-
+| 32 || 192.0
+|-
+| 64 || 384.0
+|-
+| 86 || 516.0
+|}
+
+INPUT:
+* MASTER BIP32 ROOT KEY: xprv9s21ZrQH143K2LBWUUQRFXhucrQqBpKdRRxNVq2zBqsx8HVqFk2uYo8kmbaLLHRdqtQpUm98uKfu3vca1LqdGhUtyoFnCNkfmXRyPXLjbKb
+* PATH: m/83696968'/707764'/21'/0'
+
+OUTPUT
+* DERIVED ENTROPY=d7ad61d4a76575c5bad773feeb40299490b224e8e5df6c8ad8fe3d0a6eed7b85ead9fef7bcca8160f0ee48dc6e92b311fc71f2146623cc6952c03ce82c7b63fe
+* DERIVED PWD=dKLoepugzdVJvdL56ogNV
+
+===PWD BASE85===
+Application number: 707785'
+
+The derivation path format is: <code>m/83696968'/707785'/{pwd_len}'/{index}'</code>
+
+`10 <= pwd_len <= 80`
+
+Base85 encode the all 64 bytes of entropy.
+Remove any spaces or new lines inserted by Base64 encoding process. Slice base85 result string
+on index 0 to `pwd_len`. This slice is the password. `pwd_len` is limited to 80 characters.
+
+Entropy calculation:<br>
+R = 85<br>
+L = pwd_len<br>
+Entropy = log2(R ** L)<br>
+
+{| class="wikitable" style="margin:auto"
+! pwd_length !! (cca) entropy
+|-
+| 10 || 64.0
+|-
+| 15 || 96.0
+|-
+| 20 || 128.0
+|-
+| 30 || 192.0
+|-
+| 20 || 512.0
+|}
+
+INPUT:
+* MASTER BIP32 ROOT KEY: xprv9s21ZrQH143K2LBWUUQRFXhucrQqBpKdRRxNVq2zBqsx8HVqFk2uYo8kmbaLLHRdqtQpUm98uKfu3vca1LqdGhUtyoFnCNkfmXRyPXLjbKb
+* PATH: m/83696968'/707785'/12'/0'
+
+OUTPUT
+* DERIVED ENTROPY=f7cfe56f63dca2490f65fcbf9ee63dcd85d18f751b6b5e1c1b8733af6459c904a75e82b4a22efff9b9e69de2144b293aa8714319a054b6cb55826a8e51425209
+* DERIVED PWD=_s`{TW89)i4`
+
===RSA===
Application number: 828365'
diff --git a/bip-0086.mediawiki b/bip-0086.mediawiki
index f724884..529f094 100644
--- a/bip-0086.mediawiki
+++ b/bip-0086.mediawiki
@@ -2,7 +2,7 @@
BIP: 86
Layer: Applications
Title: Key Derivation for Single Key P2TR Outputs
- Author: Andrew Chow <andrew@achow101.com>
+ Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0086
Status: Draft
diff --git a/bip-0093.mediawiki b/bip-0093.mediawiki
new file mode 100644
index 0000000..da349fd
--- /dev/null
+++ b/bip-0093.mediawiki
@@ -0,0 +1,599 @@
+<pre>
+ BIP: 93
+ Layer: Applications
+ Title: codex32: Checksummed SSSS-aware BIP32 seeds
+ Author: Leon Olsson Curr and Pearlwort Sneed <pearlwort@wpsoftware.net>
+ Andrew Poelstra <andrew.poelstra@gmail.com>
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0093
+ Status: Draft
+ Type: Informational
+ Created: 2023-02-13
+ License: BSD-3-Clause
+ Post-History: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2023-February/021469.html
+</pre>
+
+==Introduction==
+
+===Abstract===
+
+This document describes a standard for backing up and restoring the master seed of a
+[https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP-0032] hierarchical deterministic wallet, using Shamir's secret sharing.
+It includes an encoding format, a BCH error-correcting checksum, and algorithms for share generation and secret recovery.
+Secret data can be split into up to 31 shares.
+A minimum threshold of shares, which can be between 1 and 9, is needed to recover the secret, whereas without sufficient shares, no information about the secret is recoverable.
+
+===Copyright===
+
+This document is licensed under the 3-clause BSD license.
+
+===Motivation===
+
+BIP-0032 master seed data is the source entropy used to derive all private keys in an HD wallet.
+Safely storing this secret data is the hardest and most important part of self-custody.
+However, there is a tension between security, which demands limiting the number of backups, and resilience, which demands widely replicated backups.
+Encrypting the seed does not change this fundamental tradeoff, since it leaves essentially the same problem of how to back up the encryption key(s).
+
+To allow users freedom to make this tradeoff, we use Shamir's secret sharing, which guarantees that any number of shares less than the threshold leaks no information about the secret.
+This approach allows increasing safety by widely distributing the generated shares, while also providing security against the compromise of one or more shares (as long as fewer than the threshold have been compromised).
+
+[https://github.com/satoshilabs/slips/blob/master/slip-0039.md SLIP-0039] has essentially the same motivations as this standard.
+However, unlike SLIP-0039,
+
+* this standard aims to be simple enough for hand computation
+* we use the bech32 alphabet rather than a word list, resulting in fixed-length compact encodings
+* we do not support multi-level secret sharing (splitting of shares), although it is technically possible and may be added in a future BIP
+* because of the need to support hand computation, we '''do not''' support passphrases or key hardening
+
+Users who demand a higher level of security for particular secrets, or have a general distrust in digital electronic devices, have the option of using hand computation to backup and restore secret data in an interoperable manner.
+In particular, all computations can be done with simple lookup tables.
+'''It is therefore possible to compute and verify checksums, and to split and recover seeds, entirely using pen and paper.'''
+For long-lived rarely-used seeds, the ability to hand-verify checksums has a significant benefit even for users who do not care to do any other part of this process by hand.
+It means that they can verify the integrity (against non-malicious tampering) of their shares regularly, say, on an annual basis, without needing to continually expose secret data to new hardware.
+
+The ability to compute properties by hand comes from our choice of a small field and our use of linear error correcting codes.
+It does not come with any reduction in security, as long as users use high-quality randomness.
+Note that hand computation is optional, the particular details of hand computation are outside the scope of this standard, and implementers do not need to be concerned with this possibility.
+
+[https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki BIP-0039] serves the same purpose as this standard: encoding master seeds for storage by users.
+However, BIP-0039 has no error-correcting ability, cannot sensibly be extended to support secret sharing, has no support for versioning or other metadata, and has many technical design decisions that make implementation and interoperability difficult (for example, the use of SHA-512 to derive seeds, or the use of 11-bit words).
+
+==Specification==
+
+===codex32===
+
+A codex32 string is similar to a bech32 string defined in [https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki BIP-0173].
+It reuses the base-32 character set from BIP-0173, and consists of:
+
+* A human-readable part, which is the string "ms" (or "MS").
+* A separator, which is always "1".
+* A data part which is in turn subdivided into:
+** A threshold parameter, which MUST be a single digit between "2" and "9", or the digit "0".
+*** If the threshold parameter is "0" then the share index, defined below, MUST have a value of "s" (or "S").
+** An identifier consisting of 4 bech32 characters.
+** A share index, which is any bech32 character. Note that a share index value of "s" (or "S") is special and denotes the unshared secret (see section "Unshared Secret").
+** A payload which is a sequence of up to 74 bech32 characters. (However, see '''Long codex32 Strings''' below for an exception to this limit.)
+** A checksum which consists of 13 bech32 characters as described below.
+
+As with bech32 strings, a codex32 string MUST be entirely uppercase or entirely lowercase.
+For presentation, lowercase is usually preferable, but uppercase SHOULD be used for handwritten codex32 strings.
+If a codex32 string is encoded in a QR code, it SHOULD use the uppercase form, as this is encoded more compactly.
+
+===Checksum===
+
+The last thirteen characters of the data part form a checksum and contain no information.
+Valid strings MUST pass the criteria for validity specified by the Python 3 code snippet below.
+The function <code>ms32_verify_checksum</code> must return true when its argument is the data part as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
+
+To construct a valid checksum given the data-part characters (excluding the checksum), the <code>ms32_create_checksum</code> function can be used.
+
+<source lang="python">
+MS32_CONST = 0x10ce0795c2fd1e62a
+
+def ms32_polymod(values):
+ GEN = [
+ 0x19dc500ce73fde210,
+ 0x1bfae00def77fe529,
+ 0x1fbd920fffe7bee52,
+ 0x1739640bdeee3fdad,
+ 0x07729a039cfc75f5a,
+ ]
+ residue = 0x23181b3
+ for v in values:
+ b = (residue >> 60)
+ residue = (residue & 0x0fffffffffffffff) << 5 ^ v
+ for i in range(5):
+ residue ^= GEN[i] if ((b >> i) & 1) else 0
+ return residue
+
+def ms32_verify_checksum(data):
+ if len(data) >= 96: # See Long codex32 Strings
+ return ms32_verify_long_checksum(data)
+ if len(data) <= 93:
+ return ms32_polymod(data) == MS32_CONST
+ return False
+
+def ms32_create_checksum(data):
+ if len(data) > 80: # See Long codex32 Strings
+ return ms32_create_long_checksum(data)
+ values = data
+ polymod = ms32_polymod(values + [0] * 13) ^ MS32_CONST
+ return [(polymod >> 5 * (12 - i)) & 31 for i in range(13)]
+</source>
+
+===Error Correction===
+
+A codex32 string without a valid checksum MUST NOT be used.
+The checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 13 consecutive erasures.
+Implementations SHOULD provide the user with a corrected valid codex32 string if possible.
+However, implementations SHOULD NOT automatically proceed with a corrected codex32 string without user confirmation of the corrected string, either by prompting the user, or returning a corrected string in an error message and allowing the user to repeat their action.
+We do not specify how an implementation should implement error correction. However, we recommend that:
+
+* Implementations make suggestions to substitute non-bech32 characters with bech32 characters in some situations, such as replacing "B" with "8", "O" with "0", "I" with "l", etc.
+* Implementations interpret "?" as an erasure.
+* Implementations optionally interpret other non-bech32 characters, or characters with incorrect case, as erasures.
+* If a string with 8 or fewer erasures can have those erasures filled in to make a valid codex32 string, then the implementation suggests such a string as a correction.
+* If a string consisting of valid bech32 characters in the proper case can be made valid by substituting 4 or fewer characters, then the implementation suggests such a string as a correction.
+
+===Unshared Secret===
+
+When the share index of a valid codex32 string (converted to lowercase) is the letter "s", we call the string a codex32 secret.
+The payload in a codex32 secret is a direct encoding of a BIP-0032 HD master seed.
+
+The master seed is decoded by converting the payload to bytes:
+
+* Translate the characters to 5 bits values using the bech32 character table from BIP-0173, most significant bit first.
+* Re-arrange those bits into groups of 8 bits. Any incomplete group at the end MUST be 4 bits or less, and is discarded.
+
+Note that unlike the decoding process in BIP-0173, we do NOT require that the incomplete group be all zeros.
+
+For an unshared secret, the threshold parameter (the first character of the data part) is ignored (beyond the fact it must be a digit for the codex32 string to be valid).
+We recommend using the digit "0" for the threshold parameter in this case.
+The 4 character identifier also has no effect beyond aiding users in distinguishing between multiple different master seeds in cases where they have more than one.
+
+===Recovering Master Seed===
+
+When the share index of a valid codex32 string (converted to lowercase) is not the letter "s", we call the string an codex32 share.
+The first character of the data part indicates the threshold of the share, and it is required to be a non-"0" digit.
+
+In order to recover a master seed, one needs a set of valid codex32 shares such that:
+
+* All shares have the same threshold value, the same identifier, and the same length.
+* All of the share index values are distinct.
+* The number of codex32 shares is exactly equal to the (common) threshold value.
+
+If all the above conditions are satisfied, the <code>ms32_recover</code> function will return a codex32 secret when its argument is the list of codex32 shares with each share represented as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
+
+<source lang="python">
+bech32_inv = [
+ 0, 1, 20, 24, 10, 8, 12, 29, 5, 11, 4, 9, 6, 28, 26, 31,
+ 22, 18, 17, 23, 2, 25, 16, 19, 3, 21, 14, 30, 13, 7, 27, 15,
+]
+
+def bech32_mul(a, b):
+ res = 0
+ for i in range(5):
+ res ^= a if ((b >> i) & 1) else 0
+ a *= 2
+ a ^= 41 if (32 <= a) else 0
+ return res
+
+def bech32_lagrange(l, x):
+ n = 1
+ c = []
+ for i in l:
+ n = bech32_mul(n, i ^ x)
+ m = 1
+ for j in l:
+ m = bech32_mul(m, (x if i == j else i) ^ j)
+ c.append(m)
+ return [bech32_mul(n, bech32_inv[i]) for i in c]
+
+def ms32_interpolate(l, x):
+ w = bech32_lagrange([s[5] for s in l], x)
+ res = []
+ for i in range(len(l[0])):
+ n = 0
+ for j in range(len(l)):
+ n ^= bech32_mul(w[j], l[j][i])
+ res.append(n)
+ return res
+
+def ms32_recover(l):
+ return ms32_interpolate(l, 16)
+</source>
+
+===Generating Shares===
+
+If we already have ''t'' valid codex32 strings such that:
+
+* All strings have the same threshold value ''t'', the same identifier, and the same length
+* All of the share index values are distinct
+
+Then we can derive additional shares with the <code>ms32_interpolate</code> function by passing it a list of exactly ''t'' of these codex32 strings, together with a fresh share index distinct from all of the existing share indexes.
+The newly derived share will have the provided share index.
+
+Once a user has generated ''n'' codex32 shares, they may discard the codex32 secret (if it exists).
+The ''n'' shares form a ''t'' of ''n'' Shamir's secret sharing scheme of a codex32 secret.
+
+There are two ways to create an initial set of ''t'' valid codex32 strings, depending on whether the user already has an existing master seed to split.
+
+====For a fresh master seed====
+
+In the case that the user wishes to generate a fresh master seed, the user generates random initial shares, as follows:
+
+# Choose a bitsize, between 128 and 512, which must be a multiple of 8.
+# Choose a threshold value ''t'' between 2 and 9, inclusive
+# Choose a 4 bech32 character identifier
+#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
+# ''t'' many times, generate a random share by:
+## Take the next available letter from the bech32 alphabet, in alphabetical order, as <code>a</code>, <code>c</code>, <code>d</code>, ..., to be the share index
+## Set the first nine characters to be the prefix <code>ms1</code>, the threshold value ''t'', the 4-character identifier, and then the share index
+## Choose the next ceil(''bitlength / 5'') characters uniformly at random
+## Generate a valid checksum in accordance with the Checksum section, and append this to the resulting shares
+
+The result will be ''t'' distinct shares, all with the same initial 8 characters, and a distinct share index as the 9th character.
+
+With this set of ''t'' codex32 shares, new shares can be derived as discussed above. This process generates a fresh master seed, whose value can be retrieved by running the recovery process on any ''t'' of these shares.
+
+====For an existing master seed====
+
+Before generating shares for an existing master seed, it first must be converted into a codex32 secret, as described above.
+The conversion process consists of:
+
+# Choose a threshold value ''t'' between 2 and 9, inclusive
+# Choose a 4 bech32 character identifier
+#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
+# Set the share index to <code>s</code>
+# Set the payload to a bech32 encoding of the master seed, padded with arbitrary bits
+# Generating a valid checksum in accordance with the Checksum section
+
+Along with the codex32 secret, the user must generate ''t''-1 other codex32 shares, each with the same threshold value, the same identifier, and a distinct share index.
+These shares should be generated as described in the "fresh master seed" section.
+
+The codex32 secret and the ''t''-1 codex32 shares form a set of ''t'' valid codex32 strings from which additional shares can be derived as described above.
+
+===Long codex32 Strings===
+
+The 13 character checksum design only supports up to 80 data characters.
+Excluding the threshold, identifier and index characters, this limits the payload to 74 characters or 46 bytes.
+While this is enough to support the 32-byte advised size of BIP-0032 master seeds, BIP-0032 allows seeds to be up to 64 bytes in size.
+We define a long codex32 string format to support these longer seeds by defining an alternative checksum.
+
+<source lang="python">
+MS32_LONG_CONST = 0x43381e570bf4798ab26
+
+def ms32_long_polymod(values):
+ GEN = [
+ 0x3d59d273535ea62d897,
+ 0x7a9becb6361c6c51507,
+ 0x543f9b7e6c38d8a2a0e,
+ 0x0c577eaeccf1990d13c,
+ 0x1887f74f8dc71b10651,
+ ]
+ residue = 0x23181b3
+ for v in values:
+ b = (residue >> 70)
+ residue = (residue & 0x3fffffffffffffffff) << 5 ^ v
+ for i in range(5):
+ residue ^= GEN[i] if ((b >> i) & 1) else 0
+ return residue
+
+def ms32_verify_long_checksum(data):
+ return ms32_long_polymod(data) == MS32_LONG_CONST
+
+def ms32_create_long_checksum(data):
+ values = data
+ polymod = ms32_long_polymod(values + [0] * 15) ^ MS32_LONG_CONST
+ return [(polymod >> 5 * (14 - i)) & 31 for i in range(15)]
+</source>
+
+A long codex32 string follows the same specification as a regular codex32 string with the following changes.
+
+* The payload is a sequence of between 75 and 103 bech32 characters.
+* The checksum consists of 15 bech32 characters as defined above.
+
+A codex32 string with a data part of 94 or 95 characters is never legal as a regular codex32 string is limited to 93 data characters and a long codex32 string is at least 96 characters.
+
+Generation of long shares and recovery of the master seed from long shares proceeds in exactly the same way as for regular shares with the <code>ms32_interpolate</code> function.
+
+The long checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 15 consecutive erasures.
+As with regular checksums we do not specify how an implementation should implement error correction, and all our recommendations for error correction of regular codex32 strings also apply to long codex32 strings.
+
+==Rationale==
+
+This scheme is based on the observation that the Lagrange interpolation of valid codewords in a BCH code will always be a valid codeword.
+This means that derived shares will always have valid checksum, and a sufficient threshold of shares with valid checksums will derive a secret with a valid checksum.
+
+The header system is also compatible with Lagrange interpolation, meaning all derived shares will have the same identifier and will have the appropriate share index.
+This fact allows the header data to be covered by the checksum.
+
+The checksum size and identifier size have been chosen so that the encoding of 128-bit seeds and shares fit within 48 characters.
+This is a standard size for many common seed storage formats, which has been popularized by the 12 four-letter word format of the BIP-0039 mnemonic.
+
+The 13 character checksum is adequate to correct 4 errors in up to 93 characters (80 characters of data and 13 characters of the checksum).
+We can correct up to 8 erasures (errors with known locations), and up to 13 consecutive errors (burst errors).
+Beyond that, our code is guaranteed to detect up to 8 errors.
+More generally, any number of random errors will be detected with overwhelming (1 - 2^65) probability. However, the checksum does not protect against maliciously constructed errors.
+These parameters are slightly better than those of the checksum used in SLIP-0039.
+
+For 256-bit seeds and shares our strings are 74 characters, which fits into the 96 character format of the 24 four-letter word format of the BIP-0039 mnemonic, with plenty of room to spare.
+
+A longer checksum is needed to support up to 512-bit seeds, the longest seed length specified in BIP-0032, as the 13 character checksum isn't adequate for more than 80 data characters.
+While we could use the 15 character checksum for both cases, we prefer to keep the strings as short as possible for the more common cases of 128-bit and 256-bit master seeds.
+We only guarantee to correct 4 characters no matter how long the string is.
+Longer strings mean more chances for transcription errors, so shorter strings are better.
+
+The longest data part using the regular 13 character checksum is 93 characters and corresponds to a 400-bit secret.
+At this length, the prefix <code>MS1</code> is not covered by the checksum.
+This is acceptable because the checksum scheme itself requires you to know that the <code>MS1</code> prefix is being used in the first place.
+If the prefix is damaged and a user is guessing that the data might be using this scheme, then the user can enter the available data explicitly using the suspected <code>MS1</code> prefix.
+
+===Not BIP-0039 Entropy===
+
+Instead of encoding a BIP-0032 master seed, an alternative would be to encode BIP-0039 entropy.
+However this alternative approach is fraught with difficulties.
+
+On approach would be to encode the BIP-0039 entropy along with the BIP-0039 checksum data.
+This data can directly be recovered from the BIP-0039 mnemonic, and the process can be reversed if one knows the target language.
+However, for a 128-bit seed, there is a 4 bit checksum yielding 132 bits of data that needs to be encoded.
+This exceeds the 130-bits of room that we have for storing 128 bit seeds.
+We would have to compromise on the 48 character size, or the size of the headers, or the size of the checksum in order to add room for an additional character of data.
+
+This approach would also eliminate our short cut generation of a fresh master secret from generating random shares.
+One would be required to first generate BIP-0039 entropy, and then add a BIP-0039 checksum, before adding a Codex32 checksum and then generate other shares.
+In particular, this process could no longer be performed by hand since it is effectively impossible to hand compute a BIP-0039 checksum.
+
+An alternative approach is to discard the BIP-0039 checksum, since it is inadequate for error correction anyways, and rely on the Codex32 checksum.
+However, this approach ends up eliminating the benefits of BIP-0039 compatibility.
+While it is now possible to hand generate fresh shares, it is impossible to recover compatible BIP-0039 words by hand because, again, the BIP-0039 checksum is not hand computable.
+The only way of generating the compatible BIP-0039 mnemonic is to use wallet software.
+But if the wallet software is need to support this approach to decoding entropy, we may as well bypass all of the overhead of BIP-0039 and directly encode the entropy of a BIP-0032 master seed, which is what we do in our Codex32 proposal.
+
+Beyond the problems above, BIP-0039 does not define a single transformation from entropy to BIP-0032 master seed.
+Instead every different language has it own word list (or word lists) and each choice of word list yields a different transformation from entropy to master seed.
+We would need to encode the choice of word list in our share's meta-data, which takes up even more room, and is difficult to specify due to the ever-evolving choice of word lists.
+
+Alternatively we could standardize on the choice of the English word list, something that is nearly a de facto standard, and simply be incompatible with BIP-0039 wallets of other languages.
+Such a choice also risks users of BIP-0039 recovering their entropy from their language, encoding it in in Codex32 and then failing to recover their wallet because the English word lists has replaced their language's word list.
+
+The main advantage of this alternative approach would be that wallets could give users an option switch between backing up their entropy as a BIP-0039 mnemonic and in Codex32 format, but again, only if their language choice happens to be the English word list.
+In practice, we do not expect users in switch back and forth between backup formats, and instead just generate a fresh master seed using Codex32.
+
+Seeing little value with BIP-0039 compatibility (English-only), all the difficulties with BIP-0039 language choice, not to mention the PBKDF2 overhead of using BIP-0039, we think it is best to abandon BIP-0039 and encode BIP-0032 master seeds directly.
+Our approach is semi-convertible with BIP-0039's 512-bit master seeds (in all languages, see Backwards Compatibility) and fully interconvertible with SLIP-39 encoded master seeds or any other encoding of BIP-0032 master seeds.
+
+==Backwards Compatibility==
+
+codex32 is an alternative to BIP-0039 and SLIP-0039.
+It is technically possible to derive the BIP32 master seed from seed words encoded in one of these schemes, and then to encode this seed in codex32.
+For BIP-0039 this process is irreversible, since it involves hashing the original words.
+Furthermore, the resulting seed will be 512 bits long, which may be too large to be safely and conveniently handled.
+
+SLIP-0039 seed words can be reversibly converted to master seeds, so it is possible to interconvert between SLIP-0039 and codex32.
+However, SLIP-0039 '''shares''' cannot be converted to codex32 shares because the two schemes use a different underlying field.
+
+The authors of this BIP do not recommend interconversion.
+Instead, users who wish to switch to codex32 should generate a fresh seed and sweep their coins.
+
+==Reference Implementation==
+
+Our [https://github.com/BlockstreamResearch/codex32 reference implementation repository] contains implementations in Rust and PostScript.
+The inline code in this BIP text can be used as a Python reference.
+
+==Test Vectors==
+
+===Test vector 1===
+
+This example shows the codex32 format, when used without splitting the secret into any shares.
+The payload contains 26 bech32 characters, which corresponds to 130 bits. We truncate the last two bits in order to obtain a 128-bit master seed.
+
+codex32 secret (bech32): <code>ms10testsxxxxxxxxxxxxxxxxxxxxxxxxxx4nzvca9cmczlw</code>
+
+Master secret (hex): <code>318c6318c6318c6318c6318c6318c631</code>
+
+* human-readable part: <code>ms</code>
+* separator: <code>1</code>
+* k value: <code>0</code> (no secret splitting)
+* identifier: <code>test</code>
+* share index: <code>s</code> (the secret)
+* payload: <code>xxxxxxxxxxxxxxxxxxxxxxxxxx</code>
+* checksum: <code>4nzvca9cmczlw</code>
+* master node xprv: <code>xprv9s21ZrQH143K3taPNekMd9oV5K6szJ8ND7vVh6fxicRUMDcChr3bFFzuxY8qP3xFFBL6DWc2uEYCfBFZ2nFWbAqKPhtCLRjgv78EZJDEfpL</code>
+
+===Test vector 2===
+
+This example shows generating a new master seed using "random" codex32 shares, as well as deriving an additional codex32 share, using ''k''=2 and an identifier of <code>NAME</code>.
+Although codex32 strings are canonically all lowercase, it's also valid to use all uppercase.
+
+Share with index <code>A</code>: <code>MS12NAMEA320ZYXWVUTSRQPNMLKJHGFEDCAXRPP870HKKQRM</code>
+
+Share with index <code>C</code>: <code>MS12NAMECACDEFGHJKLMNPQRSTUVWXYZ023FTR2GDZMPY6PN</code>
+
+* Derived share with index <code>D</code>: <code>MS12NAMEDLL4F8JLH4E5VDVULDLFXU2JHDNLSM97XVENRXEG</code>
+* Secret share with index <code>S</code>: <code>MS12NAMES6XQGUZTTXKEQNJSJZV4JV3NZ5K3KWGSPHUH6EVW</code>
+* Master secret (hex): <code>d1808e096b35b209ca12132b264662a5</code>
+* master node xprv: <code>xprv9s21ZrQH143K2NkobdHxXeyFDqE44nJYvzLFtsriatJNWMNKznGoGgW5UMTL4fyWtajnMYb5gEc2CgaKhmsKeskoi9eTimpRv2N11THhPTU</code>
+
+Note that per BIP-0173, the lowercase form is used when determining a character's value for checksum purposes.
+In particular, given an all uppercase codex32 string, we still use lowercase <code>ms</code> as the human-readable part during checksum construction.
+
+===Test vector 3===
+
+This example shows splitting an existing 128-bit master seed into "random" codex32 shares, using ''k''=3 and an identifier of <code>cash</code>.
+We appended two zero bits in order to obtain 26 bech32 characters (130 bits of data) from the 128-bit master seed.
+
+Master secret (hex): <code>ffeeddccbbaa99887766554433221100</code>
+
+Secret share with index <code>s</code>: <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
+
+Share with index <code>a</code>: <code>ms13casha320zyxwvutsrqpnmlkjhgfedca2a8d0zehn8a0t</code>
+
+Share with index <code>c</code>: <code>ms13cashcacdefghjklmnpqrstuvwxyz023949xq35my48dr</code>
+
+* Derived share with index <code>d</code>: <code>ms13cashd0wsedstcdcts64cd7wvy4m90lm28w4ffupqs7rm</code>
+* Derived share with index <code>e</code>: <code>ms13casheekgpemxzshcrmqhaydlp6yhms3ws7320xyxsar9</code>
+* Derived share with index <code>f</code>: <code>ms13cashf8jh6sdrkpyrsp5ut94pj8ktehhw2hfvyrj48704</code>
+* master node xprv: <code>xprv9s21ZrQH143K266qUcrDyYJrSG7KA3A7sE5UHndYRkFzsPQ6xwUhEGK1rNuyyA57Vkc1Ma6a8boVqcKqGNximmAe9L65WsYNcNitKRPnABd</code>
+
+Any three of the five shares among <code>acdef</code> can be used to recover the secret.
+
+Note that the choice to append two zero bits was arbitrary, and any of the following four secret shares would have been valid choices.
+However, each choice would have resulted in a different set of derived shares.
+
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qpte35dvzkjpt0r</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qzfatvdwq5692k6</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qrsx6ydhed97jx2</code>
+
+===Test vector 4===
+
+This example shows converting a 256-bit secret into a codex32 secret, without splitting the secret into any shares.
+We appended four zero bits in order to obtain 52 bech32 characters (260 bits of data) from the 256-bit secret.
+
+256-bit secret (hex): <code>ffeeddccbbaa99887766554433221100ffeeddccbbaa99887766554433221100</code>
+
+* codex32 secret: <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
+* master node xprv: <code>xprv9s21ZrQH143K3s41UCWxXTsU4TRrhkpD1t21QJETan3hjo8DP5LFdFcB5eaFtV8x6Y9aZotQyP8KByUjgLTbXCUjfu2iosTbMv98g8EQoqr</code>
+
+Note that the choice to append four zero bits was arbitrary, and any of the following sixteen codex32 secrets would have been valid:
+
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqpj82dp34u6lqtd</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqzsrs4pnh7jmpj5</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqrfcpap2w8dqezy</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqy5tdvphn6znrf0</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq9dsuypw2ragmel</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqx05xupvgp4v6qx</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq8k0h5p43c2hzsk</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqgum7hplmjtr8ks</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqf9q0lpxzt5clxq</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq28y48pyqfuu7le</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqt7ly0paesr8x0f</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqvrvg7pqydv5uyz</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqd6hekpea5n0y5j</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqwcnrwpmlkmt9dt</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq0pgjxpzx0ysaam</code>
+
+===Test vector 5===
+
+This example shows generating a new 512-bit master seed using "random" codex32 characters and appending a checksum.
+The payload contains 103 bech32 characters, which corresponds to 515 bits. The last three bits are discarded when converting to a 512-bit master seed.
+
+This is an example of a '''Long codex32 String'''.
+
+* Secret share with index <code>S</code>: <code>MS100C8VSM32ZXFGUHPCHTLUPZRY9X8GF2TVDW0S3JN54KHCE6MUA7LQPZYGSFJD6AN074RXVCEMLH8WU3TK925ACDEFGHJKLMNPQRSTUVWXY06FHPV80UNDVARHRAK</code>
+* Master secret (hex): <code>dc5423251cb87175ff8110c8531d0952d8d73e1194e95b5f19d6f9df7c01111104c9baecdfea8cccc677fb9ddc8aec5553b86e528bcadfdcc201c17c638c47e9</code>
+* master node xprv: <code>xprv9s21ZrQH143K4UYT4rP3TZVKKbmRVmfRqTx9mG2xCy2JYipZbkLV8rwvBXsUbEv9KQiUD7oED1Wyi9evZzUn2rqK9skRgPkNaAzyw3YrpJN</code>
+
+===Invalid test vectors===
+
+These examples have incorrect checksums.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghq</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghp</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxlk3yepcstwr</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx6pgnv7jnpcsp</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxx0cpvr7n4geq</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxm5252y7d3lr</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxrd9sukzl05ej</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxc55srw5jrm0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxgc7rwhtudwc</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx4gy22afwghvs</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe8yfm0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvm597d</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pe0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pew</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxqyadsp3nywm8a</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxzvg7ar4hgaejk</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcznau0advgxqe</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxch3jrc6j5040j</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx52gxl6ppv40mcv</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx7g4g2nhhle8fk</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx63m45uj8ss4x8</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy4r708q7kg65x</code>
+
+These examples use the wrong checksum for their given data sizes.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxurfvwmdcmymdufv</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxcsyppjkd8lz4hx3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxu6hwvl5p0l9xf3c</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxwqey9rfs6smenxa</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxv70wkzrjr4ntqet</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx3hmlrmpa4zl0v</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrfggf88znkaup</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpt7l4aycv9qzj</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxus27z9xtyxyw3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcwm4re8fs78vn</code>
+
+These examples have improper lengths.
+They are either too short, too long, or would decode to byte sequence with an incomplete group greater than 4 bits.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxw0a4c70rfefn4</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxk4pavy5n46nea</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx9lrwar5zwng4w</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxr335l5tv88js3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvu7q9nz8p7dj68v</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpq6k542scdxndq3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxkmfw6jm270mz6ej</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxzhddxw99w7xws</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx42cux6um92rz</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxarja5kqukdhy9</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxky0ua3ha84qk8</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9eheesxadh2n2n9</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9llwmgesfulcj2z</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx02ev7caq6n9fgkf</code>
+
+This example uses a "0" threshold with a non-"s" index
+
+* <code>ms10fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx0z26tfn0ulw3p</code>
+
+This example has a threshold that is not a digit.
+
+* <code>ms1fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxda3kr3s0s2swg</code>
+
+These examples do not begin with the required "ms" or "MS" prefix and/or are missing the "1" separator.
+
+* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
+* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
+* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxx8t28z74x8hs4l</code>
+* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxh9d0fhnvfyx3x</code>
+
+These examples all incorrectly mix upper and lower case characters.
+
+* <code>Ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>mS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>MS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10FAUXsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10fauxSxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10fauxsXXXXXXXXXXXXXXXXXXXXXXXXXXuqxkk05lyf3x2</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxUQXKK05LYF3X2</code>
+
+==Appendix==
+
+===Mathematical Companion===
+
+Below we use the bech32 character set to denote values in GF[32].
+In bech32, the letter <code>Q</code> denotes zero and the letter <code>P</code> denotes one.
+The digits <code>0</code> and <code>2</code> through <code>9</code> do ''not'' denote their numeric values.
+They are simply elements of GF[32].
+
+The generating polynomial for our BCH code is as follows.
+
+We extend GF[32] to GF[1024] by adjoining a primitive cube root of unity, <code>ζ</code>, satisfying <code>ζ^2 = ζ + P</code>.
+
+We select <code>β := G ζ</code> which has order 93, and construct the product <code>(x - β^i)</code> for <code>i</code> in <code>{17, 20, 46, 49, 52, 77, 78, 79, 80, 81, 82, 83, 84}</code>.
+The resulting polynomial is our generating polynomial for our 13 character checksum:
+
+ x^13 + E x^12 + M x^11 + 3 x^10 + G x^9 + Q x^8 + E x^7 + E x^6 + E x^5 + L x^4 + M x^3 + C x^2 + S x + S
+
+For our long checksum, we select <code>γ := E + X ζ</code>, which has order 1023, and construct the product <code>(x - γ^i)</code> for <code>i</code> in <code>{32, 64, 96, 895, 927, 959, 991, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026}</code>.
+The resulting polynomial is our generating polynomial for our 15 character checksum for long strings:
+
+ x^15 + 0 x^14 + 2 x^13 + E x^12 + 6 x^11 + F x^10 + E x^9 + 4 x^8 + X x^7 + H x^6 + 4 x^5 + X x^4 + 9 x^3 + K x^2 + Y x^1 + H
+
+(Reminder: the character <code>0</code> does ''not'' denote the zero of the field.)
diff --git a/bip-0119.mediawiki b/bip-0119.mediawiki
index aa226d0..acbe97c 100644
--- a/bip-0119.mediawiki
+++ b/bip-0119.mediawiki
@@ -3,6 +3,7 @@
Layer: Consensus (soft fork)
Title: CHECKTEMPLATEVERIFY
Author: Jeremy Rubin <j@rubin.io>
+ James O'Beirne <vaults@au92.org>
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0119
Status: Draft
Type: Standards Track
@@ -61,7 +62,7 @@ references.
==Detailed Specification==
The below code is the main logic for verifying CHECKTEMPLATEVERIFY, described
-in pythonic pseduocode. The canonical specification for the semantics of
+in pythonic pseudocode. The canonical specification for the semantics of
OP_CHECKTEMPLATEVERIFY as implemented in C++ in the context of Bitcoin Core can
be seen in the reference implementation.
@@ -225,12 +226,12 @@ A recent commit hash in that PR including tests and vectors can be found here ht
Once the PR is merged, this BIP should be updated to point to the specific code released.
Test vectors are available in [/bip-0119/vectors the bip-0119/vectors
-directory] for checking compatibility with the refrence implementation and BIP.
+directory] for checking compatibility with the reference implementation and BIP.
==Rationale==
The goal of CHECKTEMPLATEVERIFY is to be minimal impact on the existing codebase -- in the
-future, as we become aware of more complex but shown to be safe use cases new template types can be added.
+future, as we become aware of more complex but shown to be safe use cases, new template types can be added.
Below we'll discuss the rules one-by-one:
@@ -250,7 +251,7 @@ Were these values not committed, it would be possible to delay the spending of
an output arbitrarily as well as possible to change the TXID.
Committing these values, rather than restricting them to specific values, is
-more flexible as it permits users of CHECKTEMPLATEVERIFY the set the version and
+more flexible as it permits users of CHECKTEMPLATEVERIFY to set the version and
locktime as they please.
=====Committing to the ScriptSigs Hash=====
@@ -258,7 +259,7 @@ locktime as they please.
The scriptsig in a segwit transaction must be exactly empty, unless it is a P2SH
segwit transaction in which case it must be only the exact redeemscript. P2SH is incompatible
(unless the P2SH hash is broken) with CHECKTEMPLATEVERIFY because the template hash must commit
-to the ScriptSig, which must contain the redeemscript, which is a hash cycle.
+to the ScriptSig, which must contain the redeemscript, which is a hash cycle.
To prevent malleability when not using a segwit input, we also commit to the
scriptsig. This makes it possible to use a 2 input CHECKTEMPLATEVERIFY with a legacy pre-signed
diff --git a/bip-0127.mediawiki b/bip-0127.mediawiki
index 15c7755..44a90d7 100644
--- a/bip-0127.mediawiki
+++ b/bip-0127.mediawiki
@@ -219,6 +219,7 @@ A work-in-progress implementation of a tool that produces and verifies proofs
in the described format can be found here:
https://github.com/stevenroose/reserves
+An implementation of the custom proof PSBTs is part of the [https://bitcoindevkit.org/ BDK], and can be found here: https://crates.io/crates/bdk-reserves
== Footnotes ==
diff --git a/bip-0129.mediawiki b/bip-0129.mediawiki
index 8719fe4..608c724 100644
--- a/bip-0129.mediawiki
+++ b/bip-0129.mediawiki
@@ -452,7 +452,7 @@ sh(wsh(multi(2,[793cc70b/48'/0'/0'/1']xpub6ErVmcYYHmavsMgxEcTZyzN5sqth1ZyRpFNJC2
==Acknowledgement==
-Special thanks to Pavol Rusnak, Dmitry Petukhov, Christopher Allen, Craig Raw, Robert Spigler, Gregory Sanders, Ta Tat Tai, Michael Flaxman, Pieter Wuille, Salvatore Ingala, Andrew Chow and others for their feedback on the specification.
+Special thanks to Pavol Rusnak, Dmitry Petukhov, Christopher Allen, Craig Raw, Robert Spigler, Gregory Sanders, Ta Tat Tai, Michael Flaxman, Pieter Wuille, Salvatore Ingala, Ava Chow and others for their feedback on the specification.
==References==
diff --git a/bip-0137.mediawiki b/bip-0137.mediawiki
index 19dd536..43addba 100644
--- a/bip-0137.mediawiki
+++ b/bip-0137.mediawiki
@@ -25,7 +25,7 @@ This BIP is licensed under the 2-clause BSD license.
==Motivation==
-Since Bitcoin private keys can not only be used to sign Bitcoin transactions, but also any other message, it has become customary to use them to sign various messages for differing purposes. Some applications of signing messages with a Bitcoin private key are as follows: proof of funds for collateral, credit worthiness, enterence to events, airdrops, audits as well as other applications. While there was no BIP written for how to digitally sign messages with Bitcoin private keys with P2PKH addresses it is a fairly well understood process, however with the introduction of Segwit (both in the form of P2SH and bech32) addresses, it is unclear how to distinguish a P2PKH, P2SH, or bech32 address from one another. This BIP proposes a standard signature format that will allow clients to distinguish between the different address formats.
+Since Bitcoin private keys can not only be used to sign Bitcoin transactions, but also any other message, it has become customary to use them to sign various messages for differing purposes. Some applications of signing messages with a Bitcoin private key are as follows: proof of funds for collateral, credit worthiness, entrance to events, airdrops, audits as well as other applications. While there was no BIP written for how to digitally sign messages with Bitcoin private keys with P2PKH addresses it is a fairly well understood process, however with the introduction of Segwit (both in the form of P2SH and bech32) addresses, it is unclear how to distinguish a P2PKH, P2SH, or bech32 address from one another. This BIP proposes a standard signature format that will allow clients to distinguish between the different address formats.
==Specification==
diff --git a/bip-0151.mediawiki b/bip-0151.mediawiki
index 9b91365..793c244 100644
--- a/bip-0151.mediawiki
+++ b/bip-0151.mediawiki
@@ -5,7 +5,7 @@
Author: Jonas Schnelli <dev@jonasschnelli.ch>
Comments-Summary: Controversial; some recommendation, and some discouragement
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0151
- Status: Withdrawn
+ Status: Replaced
Type: Standards Track
Created: 2016-03-23
License: PD
diff --git a/bip-0158.mediawiki b/bip-0158.mediawiki
index 484e674..8887d32 100644
--- a/bip-0158.mediawiki
+++ b/bip-0158.mediawiki
@@ -273,10 +273,8 @@ This BIP defines one initial filter type:
The basic filter is designed to contain everything that a light client needs to
sync a regular Bitcoin wallet. A basic filter MUST contain exactly the
following items for each transaction in a block:
-* The previous output script (the script being spent) for each input, except
- for the coinbase transaction.
-* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output
- scripts.
+* The previous output script (the script being spent) for each input, except for the coinbase transaction.
+* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output scripts.
Any "nil" items MUST NOT be included into the final set of filter elements.
diff --git a/bip-0174.mediawiki b/bip-0174.mediawiki
index a8f9ac8..5e70a11 100644
--- a/bip-0174.mediawiki
+++ b/bip-0174.mediawiki
@@ -2,7 +2,7 @@
BIP: 174
Layer: Applications
Title: Partially Signed Bitcoin Transaction Format
- Author: Andrew Chow <achow101@gmail.com>
+ Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0174
Status: Final
@@ -120,7 +120,7 @@ The currently defined global types are as follows:
| <tt>PSBT_GLOBAL_TX_VERSION = 0x02</tt>
| None
| No key data
-| <tt><32-bit little endian uint version></tt>
+| <tt><32-bit little endian int version></tt>
| The 32-bit little endian signed integer representing the version number of the transaction being created. Note that this is not the same as the PSBT version number specified by the PSBT_GLOBAL_VERSION field.
| 2
| 0
@@ -453,7 +453,7 @@ The currently defined per-input types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_IN_TAP_BIP32_DERIVATION = 0x16</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this input. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this input. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -591,7 +591,7 @@ determine which outputs are change outputs and verify that the change is returni
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_OUT_TAP_BIP32_DERIVATION = 0x07</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this output. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this output. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -718,15 +718,8 @@ sign_non_witness(script_code, i):
if IsMine(key) and IsAcceptable(sighash_type):
sign(non_witness_sighash(script_code, i, input))
-for input,i in enumerate(psbt.inputs):
- if non_witness_utxo.exists:
- assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
- if redeemScript.exists:
- assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
- sign_non_witness(redeemScript, i)
- else:
- sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
- else if witness_utxo.exists:
+for input, i in enumerate(psbt.inputs):
+ if witness_utxo.exists:
if redeemScript.exists:
assert(witness_utxo.scriptPubKey == P2SH(redeemScript))
script = redeemScript
@@ -737,6 +730,13 @@ for input,i in enumerate(psbt.inputs):
else if IsP2WSH(script):
assert(script == P2WSH(witnessScript))
sign_witness(witnessScript, i)
+ else if non_witness_utxo.exists:
+ assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
+ if redeemScript.exists:
+ assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
+ sign_non_witness(redeemScript, i)
+ else:
+ sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
else:
assert False
</pre>
diff --git a/bip-0300.mediawiki b/bip-0300.mediawiki
index 20d1936..ab81c32 100644
--- a/bip-0300.mediawiki
+++ b/bip-0300.mediawiki
@@ -75,54 +75,44 @@ D1 is a list of active sidechains. D1 is updated via M1 and M2.
| Version number.
|-
| 3
-| String KeyID
-| string
-| Used to derive all sidechain deposit addresses.
-|-
-| 4<br />
-| Sidechain Private Key
-| string
-| The private key of the sidechain deposit script.
-|- style="vertical-align:middle;"
-| 5<br />
-| ScriptPubKey
-| CScript
-| Where the sidechain coins go. This always stays the same, even though the CTIP (UTXO) containing the coins is always changing.
-|- style="vertical-align:middle;"
-| 6
| Sidechain Name
| string
| A human-readable name of the sidechain.
|- style="vertical-align:middle;"
-| 7
+| 4
| Sidechain Description
| string
| A human-readable name description of the sidechain.
|- style="vertical-align:middle;"
-| 8
+| 5
| Hash1 - tarball hash
| uint256
| Intended as the sha256 hash of the tar.gz of the canonical sidechain software. (This is not enforced anywhere by Bip300, and is for human purposes only.)
|- style="vertical-align:middle;"
-| 9
+| 6
| Hash2 - git commit hash
| uint160
| Intended as the git commit hash of the canonical sidechain node software. (This is not enforced anywhere by Bip300, and is for human purposes only.)
|-
-| 10
+| 7
| Active
| bool
| Does this sidechain slot contain an active sidechain?<br />
|- style="vertical-align:middle;"
-| 11
-| "CTIP" -- Part 1 "TxID"
+| 8
+| Activation Status
+| int , int
+| The age of the proposal (in blocks); and the number of "fails" (a block that does NOT ack the sidechain). This is discarded after the sidechain activates.
+|- style="vertical-align:middle;"
+| 9
+| "CTIP" -- "TxID"
| uint256
-| The CTIP, or "Critical (TxID, Index) Pair" is a variable for keeping track of where the sidechain's money is (ie, which member of the UTXO set).
+| A UTXO that holds the sidechain's money. (Part 1 of 2).
|- style="vertical-align:middle;"
-| 12
-| "CTIP" -- Part 2 "Index"
+| 10
+| "CTIP" -- "vout"
| int32_t
-| Of the CTIP, the second element of the pair: the Index. See #11 above.
+| A UTXO that holds the sidechain's money. (Part 2 of 2).
|}
@@ -138,7 +128,7 @@ D2 is driven by M3, M4, M5, and M6. Those messages enforce the following princip
# From one block to the next, the value in "ACKs" may either increase or decrease, by a maximum of 1 (see M4).
# If a Bundle's "ACKs" reach 13150 or greater, it "succeeds" and its corresponding M6 message can be included in a block.
# If the M6 of a Bundle is paid out, it is also removed.
-# If a Bundle cannot possibly succeed ( 13500 - "ACKs" > "Blocks Remaining" ), it is removed immediately.
+# If a Bundle cannot possibly succeed ( 13150 - "ACKs" > "Blocks Remaining" ), it is removed immediately.
{| class="wikitable"
@@ -158,19 +148,21 @@ D2 is driven by M3, M4, M5, and M6. Those messages enforce the following princip
| A withdrawal attempt. Specifically, it is a "blinded transaction id" (ie, the double-Sha256 of a txn that has had two fields zeroed out, see M6) of a txn which could withdraw funds from a sidechain.
|-
| 3
-| ACKs (Work Score)
+| Work Score (ACKs)
| uint16_t
-| The current ACK-counter, which is the total number of ACKs (the PoW that has been used to validate the Bundle).
+| How many miner upvotes a withdrawal has. Starts at 0. Fastest possible rate of increase is 1 per block.
|-
| 4
-| Blocks Remaining (Age)
+| Blocks Remaining
| uint16_t
-| The number of blocks which this Bundle has remaining to accumulate ACKs
+| How long this bundle has left to live (measured in blocks). Starts at 26,300 and counts down.
|}
+D1, with all 256 slots active, reaches a maximum size of: 256 * ( 1 (map index) + 36 (outpoint) + 8 (amount) ) = 11,520 bytes.
+D2, under normal conditions, would reach a size of: (38 bytes per withdrawal * 256 sidechains) = 9,728 bytes.
-
+It is possible to spam D2. A miner can add the max M3s (256) every block, forever. This costs 9,728 on-chain bytes per block, an opportunity cost of about 43 txns. It results in no benefit to the miner whatsoever. D2 will eventually hit a ceiling at 124.5568 MB. (By comparison, the Bitcoin UTXO set is about 7,000 MB.) When the attacker stops, D2 will eventually shrink back down to 9,728 bytes.
=== The Six New Bip300 Messages ===
@@ -188,19 +180,22 @@ M1 is a coinbase OP Return output containing the following:
N-byte - The serialization of the sidechain.
1-byte nSidechain
4-byte nVersion
- x-byte strKeyID
- x-byte strPrivKey
- x-byte scriptPubKey
x-byte title
x-byte description
32-byte hashID1
20-byte hashID2
-===== Examples =====
-<img src="bip-0300/m1-gui.jpg?raw=true" align="middle"></img>
+M1 is invalid if:
+
+* It would add a duplicate entry to D1.
+* There is already an M1 in this block.
+* The sidechain serialization does not parse.
+
+Otherwise:
+
+* A new entry is added to D1, whose initial Activation Status is (age=0, fails=0).
-<img src="bip-0300/m1-cli.png?raw=true" align="middle"></img>
==== M2 -- ACK Sidechain Proposal ====
@@ -208,14 +203,30 @@ M2 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Message header (0xD6E1C5BF)
- 32-byte - sha256D hash of sidechain's serialization
+ 32-byte - the sha256D hash of sidechain's serialization
+
-===== Notes =====
+M2 is ignored if it doesn't parse, or if it is for a sidechain that doesn't exist.
-The new M1/M2 validation rules are:
+M2 is invalid if:
-# Any miner can propose a new sidechain (M1) at any time. This procedure resembles BIP 9 soft fork activation: the network must see a properly-formatted M1, followed by "acknowledgment" of the sidechain (M2) in 90% of the following 2016 blocks.
-# Bip300 comes with only 256 sidechain-slots. If all are used, it is possible to "overwrite" a sidechain. This requires vastly more M2 ACKs -- 50% of the following 26300 blocks must contain an M2. The possibility of overwrite, does not change the Bip300 security assumptions (because we already assume that the sidechain is vulnerable to miners, at a rate of 1 catastrophe per 13150 blocks).
+* An M2 is already in this block.
+* It tries to ACK two different M1s for the same slot.
+
+Otherwise:
+
+* The sidechain is "ACK"ed and does NOT get a "fail" for this block. (As it otherwise would.)
+
+A sidechain fails to activate if:
+
+* If the slot is unused: during the next 2016 blocks, it accumulates 201 fails. (Ie, 90% threshold).
+* If the slot is in use: during the next 26,300 blocks, it accumulates 13,150 fails. (Ie, 50% threshold).
+
+( Thus we can overwrite a used sidechain slot. Bip300 sidechains are already vulnerable to one catastrophe per 13150 blocks (the invalid withdrawal) so this slot-overwrite option does not change the security assumptions. )
+
+Otherwise, the sidechain activates (Active is set to TRUE).
+
+In the block in which the sidechain activates, the coinbase MUST include at least one 0-valued OP_DRIVECHAIN output. This output becomes the initial CTIP for the sidechain.
@@ -227,7 +238,7 @@ For an M6 to be valid, it must be first "prepped" by one M3 and then 13,150+ M4s
===== What are Bundles? =====
-Sidechain withdrawals take the form of “Bundles” -- named because they "bundle up" many individual withdrawal-requests into a single rare layer1 transaction.
+Sidechain withdrawals take the form of "Bundles" -- named because they "bundle up" many individual withdrawal-requests into a single rare layer1 transaction.
Sidechain full nodes aggregate the withdrawal-requests into a big set. The sidechain calculates what M6 would have to look like, to pay all of these withdrawal-requests out. Finally, the sidechain calculates what the hash of this M6 would be. This 32-byte hash identifies the Bundle.
@@ -248,11 +259,18 @@ M3 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Commitment header (0xD45AA943)
32-byte - The Bundle hash, to populate a new D2 entry
+ 1-byte - nSidechain (the slot number)
+
+M3 is ignored if it does not parse, or if it is for a sidechain that doesn't exist.
+
+M3 is invalid if:
-The new validation rules pertaining to M3 are:
+* This block already has an M3 for that nSidechain.
+* A bundle with this hash is already in D2.
+* A bundle with this hash already paid out.
+* A bundle with this hash was rejected in the past.
-# If the network detects a properly-formatted M3, it must add an entry to D2 in the very next block. The starting "Blocks Remaining" value is 26,299. The starting ACKs count is 1.
-# Each block can only contain one M3 per sidechain.
+Otherwise: M3 adds an entry to D2, with initial ACK score = 1 and initial Blocks Remaining = 26,299. (Merely being added to D2, does count as your first upvote.)
Once a Bundle is in D2, how can we give it enough ACKs to make it valid?
@@ -263,44 +281,199 @@ M4 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Commitment header (0xD77D1776)
1-byte - Version
- n-byte - The vector describing the "upvoted" bundle-choice, for each sidechain.
+ n-byte - The "upvote vector" -- describes which bundle-choice is "upvoted", for each sidechain.
-Version 0x01 uses one byte per sidechain, and applies in most cases. Version 0x02 uses two bytes per sidechain and applies in unusual situations where at least one sidechain has more than 256 distinct withdrawal-bundles in progress at one time. Other interesting versions are possible: 0x03 might say "do exactly what was done in the previous block" (which could consume a fixed 6 bytes total, regardless of how many sidechains). 0x04 might say "upvote everyone who is clearly in the lead" (which also would require a mere 6 bytes), and so forth.
+The upvote vector will code "abstain" as 0xFF (or 0xFFFF); it will code "alarm" as 0xFE (or 0xFFFE). Otherwise it simply indicates which withdrawal-bundle in the list, is the one to be "upvoted".
+
+For example: if there are two sidechains, and we wish to upvote the 7th bundle on sidechain #1 plus the 4th bundle on sidechain #2, then the upvote vector would be { 07, 04 }. And M4 would be [0x6A,D77D1776,00,0006,0003].
+
+The version number allows us to shrink the upvote vector in many cases.
+Version 0x00 omits the upvote vector entirely (ie, 6 bytes for the whole M4) and sets this block's M4 equal to the previous block's M4.
+Version 0x01 uses one byte per sidechain, and can be used while all ACKed withdrawals have an index under 256 (ie, 99.99%+ of the time).
+Version 0x02 uses a full two bytes per sidechain (each encoded in little endian), but it always works no matter how many withdrawl proposals exist.
+Version 0x03 omits the upvote vector, and instead upvotes only those withdrawals that are leading their rivals by at least 50 votes.
If a sidechain has no pending bundles, then it is skipped over when M4 is created and parsed.
-The upvote vector will code "abstain" as 0xFF (or 0xFFFF); it will code "alarm" as 0xFE (or 0xFFFE). Otherwise it simply indicates which withdrawal-bundle in the list, is the one to be "upvoted". For example, if there are two sidechains, and we wish to upvote the 7th bundle on sidechain #1 plus the 4th bundle on sidechain #2, then the vector would be 0x0704.
+For example, an upvote vector of { 2 , N/A, 1 } would be represented as [0x6A,D77D1776,01,01,00]. It means: "upvote the second bundle in sidechain #1; and the first bundle in sidechain #3" (iff sidechains #2 has no bundles proposed).
-The M4 message will be invalid (and invalidate the block), if it tries to upvote a Bundle that doesn't exist (for example, trying to upvote the 7th bundle on sidechain #2, when sidechain #2 has only three bundles). If there are no Bundles at all (no one is trying to withdraw from any sidechain), then *any* M4 message present in the coinbase will be invalid. If M4 is NOT present in a block, then it is treated as "abstain".
+An upvote vector of { N/A, N/A, 4 } would be [0x6A,D77D1776,01,03].
-The ACKed withdrawal will gain one point for its ACK field. Therefore, the ACK-counter of any Bundle can only change by (-1,0,+1).
-Within a sidechain-group, upvoting one Bundle ("+1") requires you to downvote all other Bundles in that group. However, the minimum ACK-counter is zero. While only one Bundle can be upvoted at once; the whole group can all be unchanged at once ("abstain"), and they can all be downvoted at once ("alarm").
+The M4 message will be invalid (and invalidate the block), if:
+
+* It tries to upvote a Bundle that doesn't exist. (For example, trying to upvote the 7th bundle on sidechain #2, when sidechain #2 has only three bundles.)
+* There are no Bundles at all, from any sidechain.
+
+If M4 is NOT present in a block, then it is treated as "abstain".
+
+If M4 is present and valid: each withdrawal-bundle that is ACKed, will gain one upvote.
+
+Important: Within a sidechain-group, upvoting one Bundle ("+1") automatically downvotes ("-1") all other Bundles in that group. However, the minimum ACK-counter is zero. While only one Bundle can be upvoted at once; the whole group can all be unchanged at once ("abstain"), and they can all be downvoted at once ("alarm").
+
+For example:
+
+{| class="wikitable"
+|-
+! SC#
+! Bundle Hash
+! ACKs
+! Blocks Remaining
+|-
+| 1
+| h1
+| 45
+| 22,109
+|-
+| 1
+| h2
+| 12
+| 22,008
+|-
+| 2
+| h3
+| 13
+| 22,999
+|-
+| 2
+| h4
+| 8
+| 23,550<br />
+|-
+| 2
+| h5
+| 2
+| 22,560
+|}
+
+
+...in block 900,000 could become...
+
+
+{| class="wikitable"
+|-
+! SC#
+! Bundle Hash
+! ACKs
+! Blocks Remaining
+|-
+| 1
+| h1
+| 46
+| 22,108
+|-
+| 1
+| h2
+| 11
+| 22,007
+|-
+| 2
+| h3
+| 12
+| 22,998
+|-
+| 2
+| h4
+| 9
+| 23,549<br />
+|-
+| 2
+| h5
+| 1
+| 22,559
+|}
+
+...if M4 were [0x6A,D77D1776,00,0000,0001].
-Finally, we describe Deposits and Withdrawals.
+Finally, we describe Deposits and Withdrawals.
==== M5 -- Deposit BTC to Sidechain ====
-Both M5 and M6 are regular Bitcoin txns. They are distinguished from regular txns (non-M5 non-M6 txns), when they select one of the special Bip300 CTIP UTXOs as one of their inputs (see D1).
+Each sidechain stores all its BTC in one UTXO, called the "CTIP".
+
+By definition, an M5 is a transaction which spends the CTIP and '''increases''' the quantity of coins. An M6 is a transaction which spends the CTIP and '''decreases''' the quantity of coins in the CTIP. See [https://github.com/LayerTwo-Labs/mainchain/blob/391ab390adaa19f92871d769f8e120ca62c1cf14/src/validation.cpp#L688-L801 here].
+
+Every time a deposit/withdrawal is made, the old CTIP is spent and a new one is created. (Deposits/Withdrawals never cause UTXO bloat.) At all times, the CTIP of each sidechain is cached in D1 (above).
-All of a sidechain’s coins, are stored in one UTXO, called the "CTIP". Every time a deposit or withdrawal is made, the CTIP changes. Each deposit/withdrawal will select the sidechains CTIP, and generate a new CTIP. (Deposits/Withdrawals never cause UTXO bloat.) The current CTIP is cached in D1 (above).
+Every M5 is valid, as long as:
-If the '''quantity of coins''', in the from-CTIP-to-CTIP transaction, goes '''up''', (ie, if the user is adding coins), then the txn is treated as a Deposit (M5). Else it is treated as a Withdrawal (M6). See [https://github.com/drivechain-project/mainchain/blob/e37b008fafe0701b8313993c8b02ba41dc0f8a29/src/validation.cpp#L667-L780 here].
+* It has exactly one OP_DRIVECHAIN output -- this becomes the new CTIP.
+* The new CTIP has '''more''' coins in it, than before.
-As far as mainchain consensus is concerned, all deposits to a sidechain are always valid.
==== M6 -- Withdraw BTC from a Sidechain ====
We come, finally, to the critical matter: where users can take their money *out* of the sidechain.
-First, M6 must obey the same CTIP rules of M5 (see immediately above).
+M6 is invalid if:
+
+* The blinded hash of M6 does NOT match one of the approved Bundle-hashes. (In other words: M6 must first be approved by 13,150 upvotes.)
+* The first output of M6 is NOT an OP_DRIVECHAIN. (This OP_DRIVECHAIN becomes the new CTIP. In other words: all non-withdrawn coins are paid back to the sidechain.)
+* The second output is NOT a zero-value OP_RETURN script of exactly 10 bytes, of which 8 bytes are a serialized Bitcoin amount.
+* The txn fee of M6 is NOT exactly equal to the amount of the previous bullet point.
+* There are additional OP_DRIVECHAIN outputs after the first one.
+
+Else, M6 is valid.
+
+(The point of the latter two bullet points, is to allow the bundle hash to cover the L1 transaction fee.)
-Second, an M6 is only valid for inclusion in a block, if its blinded TxID matches an "approved" Bundle hash (ie, one with an ACK score of 13150+). In other words, an M6 can only be included in a block, after the 3+ month (13150 block) ceremony.
+===OP_DRIVECHAIN===
+
+This proposal adds a single new opcode, OP_DRIVECHAIN, which has strict semantics for usage.
+OP_NOP5 (0xb4) is redefined as OP_DRIVECHAIN if and only if the entire script is OP_DRIVECHAIN followed by a single-byte push and OP_TRUE (exactly 4 bytes).
+The single-byte push contains the sidechain number.
+Note that this is not a "script number", and cannot be OP_1..OP_16 or any other kind of push; it is also unsigned, and must not be padded even if over sidechain number 127.
+The final OP_TRUE is to ensure this change remains a softfork:
+without it, sidechain numbers 0 and 128 would cause the legacy script interpreter to fail.
+
+If an OP_DRIVECHAIN input is spent, the additional rules for M5 or M6 (see above) must be enforced.
+
+====Weight adjustments====
+
+To account for the additional drivechain checks, each message adds to the block's weight:
+
+{|class="wikitable"
+! Message !! Additional weight
+|-
+| M1 || 840
+|-
+| M2 || 336
+|-
+| M3 || 848
+|-
+| M4 || ?
+|-
+| M5 || 340
+|-
+| M6 || 352
+|}
-Third, M6 must meet two accounting criteria, lest it be invalid:
-# "Give change back to Escrow" -- The first output, TxOut0, must be paid back to the sidechain's Bip300 script. In other words, all non-withdrawn coins must be paid back into the sidechain.
-# "No traditional txn fee" -- For this txn, the sum of all inputs must equal the sum of all outputs. No traditional tx fee is possible. (Of course, there is still a txn fee for miners: it is paid via an OP TRUE output in the Bundle.) We want the withdraw-ers to set the fee "inside" the Bundle, and ACK it over 3 months like everything else.
+<!--
+get: 168 WU for 1 byte
+delete: free?
+create: 168 WU for 33 bytes
+hash: 4 WU??
+search outputs: ?
+permanent "proposal rejected" lookup: infinite??
+read prev block: a lot?? maybe store...
+comparison: 4 WU?
+encode script: ?
+
+M1: 3 get, 2 create
+M2: 1 get, 1 delete, 1 create
+M3: 3 get, 1 delete, 2 create, 2 hash
+ for each coinbase output: search for prior M3 for this sidechain
+ lookup if M3 was ever rejected or paid in the past
+ for each prior proposed withdrawl: (included in 1 get+delete+create)
+M4: 1 get
+ + for every proposed withdraw, 1 get, 1 delete, 1 create, 1 add
+ v0 needs to read and parse previous block
+M5/M6 OP_DRIVECHAIN spends require 2 additional input lookups
+ for each output: check for duplicate OP_DRIVECHAINs
+ amount comparison
+ M6: encode & compare fee amount, 2 hash, counter compare
+-->
==Backward compatibility==
@@ -331,7 +504,7 @@ See http://www.drivechain.info/literature/index.html
==Credits==
-Thanks to everyone who contributed to the discussion, especially: ZmnSCPxj, Adam Back, Peter Todd, Dan Anderson, Sergio Demian Lerner, Chris Stewart, Matt Corallo, Sjors Provoost, Tier Nolan, Erik Aronesty, Jason Dreyzehner, Joe Miyamoto, Ben Goldhaber.
+Thanks to everyone who contributed to the discussion, especially: Luke Dashjr, ZmnSCPxj, Adam Back, Peter Todd, Dan Anderson, Sergio Demian Lerner, Chris Stewart, Matt Corallo, Sjors Provoost, Tier Nolan, Erik Aronesty, Jason Dreyzehner, Joe Miyamoto, Ben Goldhaber.
==Copyright==
diff --git a/bip-0324.mediawiki b/bip-0324.mediawiki
index 47032dd..8050b15 100644
--- a/bip-0324.mediawiki
+++ b/bip-0324.mediawiki
@@ -31,7 +31,7 @@ Bitcoin is a permissionless network whose purpose is to reach consensus over pub
This proposal for a new P2P protocol version (v2) aims to improve upon this by raising the costs for performing these attacks substantially, primarily through the use of unauthenticated, opportunistic transport encryption. In addition, the bytestream on the wire is made pseudorandom (i.e., indistinguishable from uniformly random bytes) to a passive eavesdropper.
-* Encryption, even when it is unauthenticated and only used when both endpoints support v2, impedes eavesdropping by forcing the attacker to become active: either by performing a persistent man-in-the-middle (MitM) attack, by downgrading connections to v1, or by spinning up their own nodes and getting honest nodes to make connections to them. Active attacks at scale are more resource intensive in general, but in case of manual, deliberate connections (as opposed to automatic, random ones) they are also in principle detectable: even very basic checks, e.g., operators manually comparing protocol versions and session IDs (as supported by the proposed protocol), will expose the attacker.
+* Encryption, even when it is unauthenticated and only used when both endpoints support v2, impedes eavesdropping by forcing the attacker to become active: either by performing a persistent man-in-the-middle (MitM) attack, by downgrading connections to v1, or by spinning up their own nodes and getting honest nodes to make connections to them. Active attacks at scale are more resource intensive in general, but in the case of manual, deliberate connections (as opposed to automatic, random ones), they are also in principle detectable: even very basic checks, e.g., operators manually comparing protocol versions and session IDs (as supported by the proposed protocol), will expose the attacker.
* Tampering, while already an inherently active attack, is costlier if the attacker is forced to maintain the state necessary for a full MitM interception.
* A pseudorandom bytestream excludes identification techniques based on pattern matching, and makes it easier to shape the bytestream in order to mimic other protocols used on the Internet. This raises the cost of a connection censoring firewall, forcing them to either resort to a full MitM attack, or operate on a more obvious allowlist basis, rather than a blocklist basis.
@@ -39,7 +39,7 @@ This proposal for a new P2P protocol version (v2) aims to improve upon this by r
As we have argued above, unauthenticated encryption<ref name="what_does_auth_mean">'''What does ''authentication'' mean in this context?''' Unfortunately, the term authentication in the context of secure channel protocols is ambiguous. It can refer to:
* The encryption scheme guaranteeing that a message obtained via successful decryption was encrypted by someone having access to the (symmetric) encryption key, and not modified after encryption by a third party. The proposal in this document achieves that property through the use of an AEAD.
-* The communication protocol establishing that the communication partner's identity matches who we expect them to be, through some public key mechanism. The proposal in this document does '''not''' include such a mechanism.</ref> provides strictly better security than no encryption. Thus all connections should use encryption, even if they are unauthenticated.
+* The communication protocol establishing that the communication partner's identity matches who we expect them to be, through some public key mechanism. The proposal in this document does '''not''' include such a mechanism.</ref> provides strictly better security than no encryption. Thus, all connections should use encryption, even if they are unauthenticated.
When it comes to authentication, the situation is not as clear as for encryption. Due to Bitcoin's permissionless nature, authentication will always be restricted to specific scenarios (e.g., connections between peers belonging to the same operator), and whether some form of (possibly partially anonymous) authentication is desired depends on the specific requirements of the involved peers. As a consequence, we believe that authentication should be addressed separately (if desired), and this proposal aims to provide a solid technical basis for future protocol upgrades, including the addition of optional authentication (see [https://github.com/sipa/writeups/tree/main/private-authentication-protocols Private authentication protocols]).
@@ -51,9 +51,9 @@ A pseudorandom bytestream is not self-identifying. Moreover, it is unopinionated
''' Why not use a secure tunnel protocol? '''
-Our goal includes making opportunistic encryption ubiquitously available, as that provides the best defense against large-scale attacks. That implies protecting both the manual, deliberate connections node operators instruct their software to make, as well as the the automatic connections Bitcoin nodes make with each other based on IP addresses obtained via gossip. While encryption per se is already possible with proxy networks or VPN networks, these are not desirable or applicable for automatic connections at scale:
-* Proxy networks like Tor or I2P introduce a separate address space, independent from network topology, with a very low cost per address making eclipse attacks cheaper. In comparison, clearnet IPv4 and IPv6 networks make obtaining multiple network identities in distinct, well-known network partitions carry a non-trivial cost. Thus, it is not desirable to have a substantial portion of nodes be exclusively connected this way, as this would significantly reduce Eclipse attack costs.<ref name="pure_tor_attack">'''Why is it a bad idea to have nodes exclusively connected over Tor?''' See the [https://arxiv.org/abs/1410.6079 Bitcoin over Tor isn't a Good Idea] paper</ref> Additionally, Tor connections come with significant bandwidth and latency costs that may not be desirable for all network users.
-* VPN networks like WireGuard or OpenVPN inherently define a private network, which requires manual configuration and therefore is not a realistic avenue for automatic connections.
+Our goal includes making opportunistic encryption ubiquitously available, as that provides the best defense against large-scale attacks. That implies protecting both the manual, deliberate connections node operators instruct their software to make, and the automatic connections Bitcoin nodes make with each other based on IP addresses obtained via gossip. While encryption per se is already possible with proxy networks or VPN protocols, these are not desirable or applicable for automatic connections at scale:
+* Proxy networks like Tor or I2P introduce a separate address space, independent of network topology, with a very low cost per address making eclipse attacks cheaper. In comparison, clearnet IPv4 and IPv6 networks make obtaining multiple network identities in distinct, well-known network partitions carry a non-trivial cost. Thus, it is not desirable to have a substantial portion of nodes be exclusively connected this way, as this would significantly reduce Eclipse attack costs.<ref name="pure_tor_attack">'''Why is it a bad idea to have nodes exclusively connected over Tor?''' See the [https://arxiv.org/abs/1410.6079 Bitcoin over Tor isn't a Good Idea] paper</ref> Additionally, Tor connections come with significant bandwidth and latency costs that may not be desirable for all network users.
+* VPN protocols like WireGuard or OpenVPN inherently define a private network, which requires manual configuration and therefore is not a realistic avenue for automatic connections.
Thus, to achieve our goal, we need a solution that has minimal costs, works without configuration, and is always enabled – on top of any network layer rather than be part of the network layer.
@@ -61,16 +61,16 @@ Thus, to achieve our goal, we need a solution that has minimal costs, works with
While it would be possible to rely on an off-the-shelf transport encryption protocol such as TLS or Noise, the specific requirements of the Bitcoin P2P network laid out above make these protocols an unsuitable choice.
-The primary requirement which existing protocols fail to meet is a sufficiently modular treatment of encryption and authentication. As we argue above, whether and which form of authentication is desired in the Bitcoin P2P network will depend on the specific requirements of the involved peers (resulting in a mix of authenticated and unauthenticated connections), and thus the question of authentication should be decoupled from encryption. However, native support for a handful of standard authentication scenarios (e.g., using digital signatures and certificates) is at core of the design of existing general-purpose transport encryption protocols. This focus on authentication would not provide clear benefits for the Bitcoin P2P network but would come with a large amount of additional complexity.
+The primary requirement which existing protocols fail to meet is a sufficiently modular treatment of encryption and authentication. As we argue above, whether and which form of authentication is desired in the Bitcoin P2P network will depend on the specific requirements of the involved peers (resulting in a mix of authenticated and unauthenticated connections), and thus the question of authentication should be decoupled from encryption. However, native support for a handful of standard authentication scenarios (e.g., using digital signatures and certificates) is at the core of the design of existing general-purpose transport encryption protocols. This focus on authentication would not provide clear benefits for the Bitcoin P2P network but would come with a large amount of additional complexity.
-In contrast, our proposal instead aims for simple modular design that makes it possible to address authentication separately. Our proposal provides a foundation for authentication by exporting a ''session ID'' that uniquely identifies the encrypted channel. After an encrypted channel has been established, the two endpoints are able to use any authentication protocol to confirm that they have the same session ID. (This is sometimes called ''channel binding'' because the session ID binds the encrypted channel to the authentication protocol.) Since in our proposal, any authentication needs to run after an encrypted connection has been established, the price we pay for this modularity is a possibly higher number of roundtrips as opposed to other protocols that perform authentication alongside with the Diffie-Hellman key exchange.<ref name="channel_binding_noise_tls">'''Do other protocols not support exporting a session ID?''' While [https://noiseprotocol.org/noise.html#channel-binding Noise] and [https://datatracker.ietf.org/doc/draft-ietf-kitten-tls-channel-bindings-for-tls13/ TLS (as a draft)] offer similar protocol extensions for exporting session IDs, using channel binding for authentication is not at the focus of their design and would not avoid the bulk of additional complexity due to the native support of authentication methods. </ref> However, the resulting increase in connection establishment latency is a not a concern for Bitcoin's long-lived connections, [https://www.dsn.kastel.kit.edu/bitcoin/ which typically live for hours or even weeks].
+In contrast, our proposal instead aims for a simple modular design that makes it possible to address authentication separately. Our proposal provides a foundation for authentication by exporting a ''session ID'' that uniquely identifies the encrypted channel. After an encrypted channel has been established, the two endpoints are able to use any authentication protocol to confirm that they have the same session ID. (This is sometimes called ''channel binding'' because the session ID binds the encrypted channel to the authentication protocol.) Since in our proposal, any authentication needs to run after an encrypted connection has been established, the price we pay for this modularity is a possibly higher number of roundtrips as opposed to other protocols that perform authentication alongside the Diffie-Hellman key exchange.<ref name="channel_binding_noise_tls">'''Do other protocols not support exporting a session ID?''' While [https://noiseprotocol.org/noise.html#channel-binding Noise] and [https://datatracker.ietf.org/doc/draft-ietf-kitten-tls-channel-bindings-for-tls13/ TLS (as a draft)] offer similar protocol extensions for exporting session IDs, using channel binding for authentication is not at the focus of their design and would not avoid the bulk of additional complexity due to the native support of authentication methods. </ref> However, the resulting increase in connection establishment latency is a not a concern for Bitcoin's long-lived connections, [https://www.dsn.kastel.kit.edu/bitcoin/ which typically live for hours or even weeks].
Besides this fundamentally different treatment of authentication, further technical issues arise when applying TLS or Noise to our desired use case:
* Neither offers a pseudorandom bytestream.
* Neither offers native support for elliptic curve cryptography on the curve secp256k1 as otherwise used in Bitcoin. While using secp256k1 is not strictly necessary, it is the obvious choice is for any new asymmetric cryptography in Bitcoin because it minimizes the cryptographic hardness assumptions as well as the dependencies that Bitcoin software will need.
* Neither offers shapability of the bytestream.
-* Both provide a stream-based interface to the application layer whereas Bitcoin requires a packet-based interface, resulting in the need for an additional thin layer to perform packet serialization and deserialization.
+* Both provide a stream-based interface to the application layer, whereas Bitcoin requires a packet-based interface, resulting in the need for an additional thin layer to perform packet serialization and deserialization.
While existing protocols could be amended to address all of the aforementioned issues, this would negate the benefits of using them as off-the-shelf solution, e.g., the possibility to re-use existing implementations and security analyses.
@@ -97,7 +97,7 @@ The specification consists of three parts:
=== Transport layer specification ===
-In this section we define the encryption protocol for messages between peers.
+In this section, we define the encryption protocol for messages between peers.
==== Overview and design ====
@@ -105,24 +105,26 @@ We first give an informal overview of the entire protocol flow and packet encryp
'''Protocol flow overview'''
-Given a newly-established connection (typically TCP/IP) between two v2 P2P nodes, there are 3 phases the connection goes through. The first starts immediately, i.e. there are no v1 messages or any other bytes exchanged on the link beforehand. The two parties are called the '''initiator''' (who established the connection) and the '''responder''' (who accepted the connection).
+Given a newly established connection (typically TCP/IP) between two v2 P2P nodes, there are 3 phases the connection goes through. The first starts immediately, i.e. there are no v1 messages or any other bytes exchanged on the link beforehand. The two parties are called the '''initiator''' (who established the connection) and the '''responder''' (who accepted the connection).
# The '''Key exchange phase''', where nodes exchange data to establish shared secrets.
#* The initiator:
#** Generates a random ephemeral secp256k1 private key and sends a corresponding 64-byte ElligatorSwift<ref name="ellswift_paper">'''What is ElligatorSwift and why use it?''' The [https://eprint.iacr.org/2022/759.pdf SwiftEC paper] describes a method called ElligatorSwift which allows encoding elliptic curve points in a way that is indistinguishable from a uniformly distributed bitstream. While a random 256-bit string has about 50% chance of being a valid X coordinate on the secp256k1 curve, every 512-bit string is a valid ElligatorSwift encoding of a curve point, making the encoded point indistinguishable from random when using an encoder that can sample uniformly.</ref><ref name="ellswift_perf">'''How fast is ElligatorSwift?''' Our benchmarks show that ElligatorSwift encoded ECDH is about 50% more expensive than unencoded ECDH. Given the fast performance of ECDH and the low frequency of new connections, we found the performance trade-off acceptable for the pseudorandom bytestream and future censorship resistance it can enable.</ref>-encoded public key to the responder.
#** May send up to 4095<ref name="why_4095_garbage">'''How was the limit of 4095 bytes garbage chosen?''' It is a balance between having sufficient freedom to hide information, and allowing it to be large enough so that the necessary 64 bytes of public key is small compared to it on the one hand, and bandwidth waste on the other hand.</ref> bytes of arbitrary data after their public key, called '''garbage''', providing a form of shapability and avoiding a recognizable pattern of exactly 64 bytes.<ref name="why_garbage">'''Why does the affordance for garbage exist in the protocol?''' The garbage strings after the public keys are needed for shapability of the handshake. Neither peer can send decoy packets before having received at least the other peer's public key, i.e., neither peer can send more than 64 bytes before having received 64 bytes.</ref>
#* The responder:
-#** Waits until one byte is received which does not match the 12 bytes consisting of the network magic followed by "version\x00". If the first 12 bytes do match, the connection is treated as using the v1 protocol instead.<ref name="why_no_prefix_check">'''What if a v2 initiator's public key starts accidentally with these 12 bytes?''' This is so unlikely (probability of ''2<sup>-96</sup>'') to happen randomly in the v2 protocol that the initiator does not need to specifically avoid it.</ref><ref>Bitcoin Core versions <=0.4.0 and >=22.0 ignore valid P2P messages that are received prior to a VERSION message. Bitcoin Core versions between 0.4.0 and 22.0 assign a misbehavior score to the peer upon receiving such messages. v2 clients implementing this proposal will interpret any message other than VERSION received as the first message to be the initiation of a v2 connection, and will result in disconnection for v1 initiators that send any message type other than VERSION as the first message. We are not aware of any implementations where this could pose a problem.</ref>
+#** Waits until one byte is received which does not match the 16 bytes consisting of the network magic followed by "version\x00\x00\x00\x00\x00". If the first 16 bytes do match, the connection is treated as using the v1 protocol instead.<ref name="why_no_prefix_check">'''What if a v2 initiator's public key starts accidentally with these 16 bytes?''' This is so unlikely (probability of ''2<sup>-128</sup>'') to happen randomly in the v2 protocol that the initiator does not need to specifically avoid it. The optional detection of wrong-network v1 peers has a probability of ''2<sup>-96</sup>'', which is still negligible compared to random network failures.</ref><ref>Bitcoin Core versions <=0.4.0 and >=22.0 ignore valid P2P messages that are received prior to a VERSION message. Bitcoin Core versions between 0.4.0 and 22.0 assign a misbehavior score to the peer upon receiving such messages. v2 clients implementing this proposal will interpret any message other than VERSION received as the first message to be the initiation of a v2 connection, and will result in disconnection for v1 initiators that send any message type other than VERSION as the first message. We are not aware of any implementations where this could pose a problem.</ref>
+#** If the first 4 received bytes do not match the network magic, but the 12 bytes after that do match the version message encoding above, implementations may interpret this as a v1 peer of a different network, and disconnect them.
#** Similarly generates a random ephemeral private key and sends a corresponding 64-byte ElligatorSwift-encoded public key to the initiator.
#** Similarly may send up to 4095 bytes of garbage data after their public key.
#* Both parties:
#** Receive (the remainder of) the full 64-byte public key from the other side.
#** Use X-only<ref name="xonly_ecdh">'''Why use X-only ECDH?''' Using only the X coordinate provides the same security as using a full encoding of the secret curve point but allows for more efficient implementation by avoiding the need for square roots to compute Y coordinates.</ref> ECDH to compute a shared secret from their private key and the exchanged public keys<ref name="why_ecdh_pubkeys">'''Why is the shared secret computation a function of the exact 64-byte public encodings sent?''' This makes sure that an attacker cannot modify the public key encoding used without modifying the rest of the stream. If a third party wants the ability to modify stream bytes, they need to perform a full MitM attack on the connection.</ref>, and deterministically derive from the secret 4 '''encryption keys''' (two in each direction: one for packet lengths, one for content encryption), a '''session id''', and two 16-byte '''garbage terminators'''<ref>'''What length is sufficient for garbage terminators?''' The length of the garbage terminators determines the probability of accidental termination of a legitimate v2 connection due to garbage bytes (sent prior to ECDH) inadvertently including the terminator. 16 byte terminators with 4095 bytes of garbage yield a negligible probability of such collision which is likely orders of magnitude lower than random connection failure on the Internet.</ref><ref>'''What does a garbage terminator in the wild look like?''' <div>[[File:bip-0324/garbage_terminator.png|none|256px|A garbage terminator model TX-v2 in the wild... sent by the responder]]</div>
</ref> (one in each direction) using HKDF-SHA256.
-#** Send their 16-byte garbage terminator<ref name="why_garbage_term">'''Why does the protocol need a garbage terminator?''' While it is in principle possible to use the garbage authentication packet directly as a terminator (scan until a valid authentication packet follows), this would be significantly slower than just scanning for a fixed byte sequence, as it would require recomputing a Poly1305 tag after every received byte.</ref> followed by a '''garbage authentication packet'''<ref name="why_garbage_auth">'''Why does the protocol require a garbage authentication packet?''' Otherwise the garbage would be modifiable by a third party without consequences. We want to force any active attacker to have to maintain a full protocol state. In addition, such malleability without the consequence of connection termination could enable protocol fingerprinting.</ref>, an '''encrypted packet''' (see further) with arbitrary '''contents''', and '''associated data''' equal to the garbage.
+#** Send their 16-byte garbage terminator.<ref name="why_garbage_term">'''Why does the protocol need a garbage terminator?''' While it is in principle possible to use the first packet after the garbage directly as a terminator (scan until a valid packet follows), this would be significantly slower than just scanning for a fixed byte sequence, as it would require recomputing a Poly1305 tag after every received byte.</ref>
#** Receive up to 4111 bytes, stopping when encountering the garbage terminator.
-#** Receive an encrypted packet, verify that it decrypts correctly with associated data set to the garbage received, and then ignore its contents.
-#* At this point, both parties have the same keys, and all further communication proceeds in the form of encrypted packets. Packets have an '''ignore bit''', which makes them '''decoy packets''' if set. Decoy packets are to be ignored by the receiver apart from verifying they decrypt correctly. Either peer may send such decoy packets at any point after this. These form the primary shapability mechanism in the protocol. How and when to use them is out of scope for this document.
+#* At this point, both parties have the same keys, and all further communication proceeds in the form of '''encrypted packets'''.
+#** Encrypted packets have an '''ignore bit''', which makes them '''decoy packets''' if set. Decoy packets are to be ignored by the receiver apart from verifying they decrypt correctly. Either peer may send such decoy packets at any point from here on. These form the primary shapability mechanism in the protocol. How and when to use them is out of scope for this document.
+#** For each of the two directions, the first encrypted packet that will be sent in that direction (regardless of it being a decoy packet or not) will make use of the associated authenticated data (AAD) feature of the AEAD to authenticate the garbage that has been sent in that direction.<ref name="why_garbage_auth">'''Why does the protocol authenticate the garbage?''' Without garbage authentication, the garbage would be modifiable by a third party without consequences. We want to force any active attacker to have to maintain a full protocol state. In addition, such malleability without the consequence of connection termination could enable protocol fingerprinting.</ref>
# The '''Version negotiation phase''', where parties negotiate what transport version they will use, as well as data defined by that version.<ref name="example_versions">'''What features could be added in future protocol versions?''' Examples of features that could be added in future versions include post-quantum cryptography upgrades to the handshake, and optional authentication.</ref>
#* The responder:
#** Sends a '''version packet''' with empty content, to indicate support for the v2 P2P protocol proposed by this document. Any other value for content is reserved for future versions.
@@ -134,21 +136,21 @@ Given a newly-established connection (typically TCP/IP) between two v2 P2P nodes
# The '''Application phase''', where the packets exchanged have contents to be interpreted as application data.
#* Whenever either peer has a message to send, it sends a packet with that application message as '''contents'''.
-In order to provide a means of avoiding the recognizable pattern of first messages being at least 64 bytes, a future backwards-compatible upgrade to this protocol may allow both peers to send their public key + garbage + garbage terminator in multiple rounds, slicing those bytes up into messages arbitrarily, as long as progress is guaranteed.<ref name="handshake_progress">'''How can progress be guaranteed in a backwards-compatible way?''' In order to guarantee progress, it must be ensured that no deadlock occurs, i.e., no state is reached in which each party waits for the other party indefinitely. For example, any upgrade that adheres to the following conditions will guarantee progress:
+To avoid the recognizable pattern of first messages being at least 64 bytes, a future backwards-compatible upgrade to this protocol may allow both peers to send their public key + garbage + garbage terminator in multiple rounds, slicing those bytes up into messages arbitrarily, as long as progress is guaranteed.<ref name="handshake_progress">'''How can progress be guaranteed in a backwards-compatible way?''' In order to guarantee progress, it must be ensured that no deadlock occurs, i.e., no state is reached in which each party waits for the other party indefinitely. For example, any upgrade that adheres to the following conditions will guarantee progress:
-* The initiator must start by sending at least as many bytes as necessary to mismatch the magic/version 12 bytes prefix.
-* The responder must start sending after having received at least one byte that mismatches that 12-byte prefix.
+* The initiator must start by sending at least as many bytes as necessary to mismatch the magic/version 16 bytes prefix.
+* The responder must start sending after having received at least one byte that mismatches that 16-byte prefix.
* As soon as either party has received the other peer's garbage terminator, or has received 4095 bytes of garbage, they must send their own garbage terminator. (When either of these conditions is met, the other party has nothing to respond with anymore that would be needed to guarantee progress otherwise.)
* Whenever either party receives any nonzero number of bytes, while not having sent their garbage terminator completely yet, they must send at least one byte in response without waiting for more bytes.
-* After either party has sent their garbage terminator, they must also send the garbage authentication packet without waiting for more bytes, and transition to the version negotiation phase.
+* After either party has sent their garbage terminator, they must transition to the version negotiation phase without waiting for more bytes.
Since the protocol as specified here adheres to these conditions, any upgrade which also adheres to these conditions will be backwards-compatible.</ref>
-Note that the version negotiation phase does not need to wait for the key exchange phase to complete; version packets can be sent immediately after sending the garbage authentication packet. So the first two phases together, jointly called '''the handshake''', comprise just 1.5 roundtrips:
+Note that the version negotiation phase does not need to wait for the key exchange phase to complete; version packets can be sent immediately after sending the garbage terminator. So the first two phases together, jointly called '''the handshake''', comprise just 1.5 roundtrips:
* the initiator sends public key + garbage
-* the responder sends public key + garbage + garbage terminator + garbage authentication packet + version packet
-* the initiator sends garbage terminator + garbage authentication packet + version packet
+* the responder sends public key + garbage + garbage terminator + decoy packets (optional) + version packet
+* the initiator sends garbage terminator + decoy packets (optional) + version packet
'''Packet encryption overview'''
@@ -157,19 +159,19 @@ All data on the wire after the garbage terminators takes the form of encrypted p
Each packet consists of:
* A 3-byte encrypted '''length''' field, encoding the length of the '''contents''' (between ''0'' and ''2<sup>24</sup>-1''<ref name="max_packet_length">'''Is ''2<sup>24</sup>-1'' bytes sufficient as maximum content size?''' The current Bitcoin P2P protocol has no messages which support more than 4000000 bytes of application payload. By supporting up to ''2<sup>24</sup>-1'' we can accommodate future evolutions needing more than 4 times that value. Hypothetical protocol changes that have even more data to exchange than that should probably use multiple separate messages anyway, because of the per-peer receive buffer sizes involved, and the inability to start processing a message before it is fully received. Of course, future versions of the transport protocol could change the size of the length field, if this were really needed.</ref>, inclusive).
* An authenticated encryption of the '''plaintext''', which consists of:
-** A 1-byte '''header''' which consists of transport layer protocol flags. Currently only the highest bit is defined as the '''ignore bit'''. The other bits are ignored, but this may change in future versions<ref>'''Why is the header a part of the plaintext and not included alongside the length field?''' The packet length field is the minimum information that must be available before we can leverage the standard RFC8439 AEAD. Any other data, including metadata like the header being in the content encryption makes it easier to reason about the protocol security w.r.t. data being used before it is authenticated. If the ignore bit was not part of the content, another mechanism would be needed to authenticate it; for example, it could be fed as AAD to the AEAD cipher. We feel the complexity of such an approach outweighs the benefit of saving one byte per message.</ref>.
+** A 1-byte '''header''' which consists of transport layer protocol flags. Currently, only the highest bit is defined as the '''ignore bit'''. The other bits are ignored, but this may change in future versions<ref>'''Why is the header a part of the plaintext and not included alongside the length field?''' The packet length field is the minimum information that must be available before we can leverage the standard RFC8439 AEAD. Any other data, including metadata like the header being in the content encryption makes it easier to reason about the protocol security w.r.t. data being used before it is authenticated. If the ignore bit was not part of the content, another mechanism would be needed to authenticate it; for example, it could be fed as AAD to the AEAD cipher. We feel the complexity of such an approach outweighs the benefit of saving one byte per message.</ref>.
** The variable-length '''contents'''.
-The encryption of the plaintext uses '''[https://en.wikipedia.org/wiki/ChaCha20-Poly1305 ChaCha20Poly1305]'''<ref name="why_chacha20">'''Why is ChaCha20Poly1305 chosen as basis for packet encryption?''' It is a very widely used authenticated encryption cipher (used amongst others in SSH, TLS 1.2, TLS 1.3, [https://en.wikipedia.org/wiki/QUIC QUIC], Noise, and [https://www.wireguard.com/protocol/ WireGuard]; in the latter it is currently even the only supported cipher), with very good performance in general purpose software implementations. While AES-based ciphers (including the winners in the [https://competitions.cr.yp.to/caesar.html CAESAR] competition in non-lightweight categories) perform significantly better on systems with AES hardware acceleration, they are also significantly slower in pure software implementations. We choose to optimize for the weakest hardware.</ref>, an [https://en.wikipedia.org/wiki/Authenticated_encryption authenticated encryption with associated data] (AEAD) cipher specified in [https://datatracker.ietf.org/doc/html/rfc8439 RFC 8439]. Every packet's plaintext is treated as a separate AEAD message, with a different nonce for each.
+The encryption of the plaintext uses '''[https://en.wikipedia.org/wiki/ChaCha20-Poly1305 ChaCha20Poly1305]'''<ref name="why_chacha20">'''Why is ChaCha20Poly1305 chosen as the basis for packet encryption?''' It is a very widely used authenticated encryption cipher (used among others in SSH, TLS 1.2, TLS 1.3, [https://en.wikipedia.org/wiki/QUIC QUIC], Noise, and [https://www.wireguard.com/protocol/ WireGuard]; in the latter it is currently even the only supported cipher), with very good performance in general purpose software implementations. While AES-based ciphers (including the winners in the [https://competitions.cr.yp.to/caesar.html CAESAR] competition in non-lightweight categories) perform significantly better on systems with AES hardware acceleration, they are also significantly slower in pure software implementations. We choose to optimize for the weakest hardware.</ref>, an [https://en.wikipedia.org/wiki/Authenticated_encryption authenticated encryption with associated data] (AEAD) cipher specified in [https://datatracker.ietf.org/doc/html/rfc8439 RFC 8439]. Every packet's plaintext is treated as a separate AEAD message, with a different nonce for each.
The length must be dealt with specially, as it is needed to determine packet boundaries before the whole packet is received and authenticated. As we want a stream that is pseudorandom to a passive attacker, it still needs encryption. We use unauthenticated<ref name="why_no_len_auth">'''Why is the length encryption not separately authenticated?''' Informally, the relevant security goal we aim for is to hide the number of packets and their lengths (i.e., the packet boundaries) against a passive attacker that receives the bytestream without timing or fragmentation information. (A formal definition can be found for example in [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Definition 22)] under the name "boundary hiding against chosen-plaintext attacks (BH-CPA)".) However, we do not aim to hide packet boundaries against active attackers because active attackers can always exploit the fact that the Bitcoin P2P protocol is largely query-response based: they can trickle the bytes on the stream one-by-one unmodified and observe when a response comes (see [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Section 3.9)] for a in-depth discussion). With that in mind, we accept that an active (non-MitM) attacker is able to figure out some information about packet boundaries by flipping certain bits in the unauthenticated length field, and observing the other side disconnecting immediately or later. Thus, we choose to use unauthenticated encryption for the length data, which is sufficient to achieve boundary hiding against passive attackers, and saves 16 bytes of bandwidth per packet.</ref> '''ChaCha20''' encryption for this, with an independent key. Note that the plaintext length is still implicitly authenticated by the encryption of the plaintext, but this can only be verified after receiving the whole packet. This design is inspired by that of the ChaCha20Poly1305 cipher suite in [http://bxr.su/OpenBSD/usr.bin/ssh/PROTOCOL.chacha20poly1305 OpenSSH].<ref name="openssl_changes">'''How does packet encryption differ from the OpenSSH design?''' The differences are:
* The length field is only 3 bytes instead of 4, as that is sufficient for our purposes.
* Length encryption keeps drawing pseudorandom bytes from the same ChaCha20 cipher for multiple packets, rather than incrementing the nonce for every packet.
* The Poly1305 authentication tag only covers the encrypted plaintext, and not the encrypted length field. This means that plaintext encryption uses the standard ChaCha20Poly1305 construction without any modifications, maximizing applicability of analysis and review of that cipher. The length encryption can be seen as a separate layer, using a separate key, and thus cannot affect any of the confidentiality or integrity guarantees of the plaintext encryption. On the other hand, this change w.r.t. OpenSSH also does not worsen any properties, as incorrect lengths will still trigger authentication failure for the overall packet (the plaintext length is implicitly authenticated by ChaCha20Poly1305).
-* A hash step is performed every 224<ref name="rekey_interval">'''How was the rekeying interval 224 chosen?''' Assuming a node sends only ping messages every 20 minutes (the timeout interval for post-[https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki BIP31] connections) on a connection, the node will transmit 224 packets in about 3.11 days. This means ''soft rekeying'' after a fixed number of packets automatically translates to an upper-bound of time interval for rekeying, while being much simpler to coordinate than an actual time-based rekeying regime. At the same time, doing it once every 224 messages is sufficiently infrequent that it has only negligible impact on performance. Furthermore, 224 times 3 bytes (the number of bytes consumed by each length encryption) is 672, which is a multiple of 64 minus 32. This means that at the end of 224 length encryptions, exactly 32 bytes of keystream data remain that can be used as next key.</ref> messages to rekey the the encryption ciphers, in order to provide forward security.
+* A hash step is performed every 224<ref name="rekey_interval">'''How was the rekeying interval 224 chosen?''' Assuming a node sends only ping messages every 20 minutes (the timeout interval for post-[https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki BIP31] connections) on a connection, the node will transmit 224 packets in about 3.11 days. This means ''soft rekeying'' after a fixed number of packets automatically translates to an upper-bound of time interval for rekeying, while being much simpler to coordinate than an actual time-based rekeying regime. At the same time, doing it once every 224 messages is sufficiently infrequent that it has only negligible impact on performance. Furthermore, 224 times 3 bytes (the number of bytes consumed by each length encryption) is 672, which is a multiple of 64 minus 32. This means that at the end of 224 length encryptions, exactly 32 bytes of keystream data remain that can be used as next key.</ref> messages to rekey the encryption ciphers, in order to provide forward security.
</ref> Because only fixed-length chunks (3-byte length fields) are encrypted, we do not need to treat all length chunks as separate messages. Instead, a single cipher (with the same nonce) is used for multiple consecutive length fields. This avoids wasting 61 pseudorandom bytes per packet, and makes the cost of having a separate cipher for length encryption negligible.<ref name="ok_to_batch">'''Is it acceptable to use a less standard construction for length encryption?''' The fact that multiple (non-overlapping) bytes generated by a single ChaCha20 cipher are used for the encryption of multiple consecutive length fields is uncommon. We feel the performance cost gained by this deviation is worth it (especially for small packets, which are very common in Bitcoin's P2P protocol), given the low guarantees that are feasible for length encryption in the first place, and the result is still sufficient to provide pseudorandomness from the view of passive attackers. For plaintext encryption, we independently use a very standard construction, as the stakes for confidentiality and integrity there are much higher.</ref>
-In order to provide forward security<ref name="rekey">'''What value does forward security provide?''' Re-keying ensures [https://eprint.iacr.org/2001/035.pdf forward secrecy within a session], i.e., an attacker compromising the current session secrets cannot derive past encryption keys in the same session.</ref><ref>'''Why have a cipher with forward secrecy but no periodical refresh of the ECDH key exchange?''' Our cipher ratchets encryption keys forward in order to protect messages encrypted under ''past'' encryption keys. In contrast, re-performing ECDH key exchange would protect messages encrypted under ''future'' encryption keys, i.e., it would re-establish security after the attacker had compromised one of the peers ''temporarily'' (e.g., the attacker obtains a memory dump). We do not believe protecting against that is a priority: an attacker that, for whatever reason, is capable of an attack that reveals encryption keys (or other session secrets) of a peer once is likely capable of performing the same attack again after peers have re-performed the ECDH key exchange. Thus, we do not believe the benefits of re-performing key exchange outweigh the additional complexity that comes with the necessary coordination between the peers. We note that the initiator could choose to close and re-open the entire connection in order to force a refresh of the ECDH key exchange, but that introduces other issues: a connection slot needs to be kept open at the responder side, it is not cryptographically guaranteed that really the same initiator will use it, and the observable TCP reset and handshake may create a detectable pattern.</ref>, the encryption keys for both plaintext and length encryption are cycled every 224 messages, by switching to a new key that is generated by the key stream using the old key.
+In order to provide forward security<ref name="rekey">'''What value does forward security provide?''' Re-keying ensures [https://eprint.iacr.org/2001/035.pdf forward secrecy within a session], i.e., an attacker compromising the current session secrets cannot derive past encryption keys in the same session.</ref><ref>'''Why have a cipher with forward secrecy but no periodical refresh of the ECDH key exchange?''' Our cipher ratchets encryption keys forward in order to protect messages encrypted under ''past'' encryption keys. In contrast, re-performing ECDH key exchange would protect messages encrypted under ''future'' encryption keys, i.e., it would re-establish security after the attacker had compromised one of the peers ''temporarily'' (e.g., the attacker obtains a memory dump). We do not believe protecting against that is a priority: an attacker that, for whatever reason, is capable of an attack that reveals encryption keys (or other session secrets) of a peer once is likely capable of performing the same attack again after peers have re-performed the ECDH key exchange. Thus, we do not believe the benefits of re-performing key exchange outweigh the additional complexity that comes with the necessary coordination between the peers. We note that the initiator could choose to close and re-open the entire connection to force a refresh of the ECDH key exchange, but that introduces other issues: a connection slot needs to be kept open at the responder side, it is not cryptographically guaranteed that really the same initiator will use it, and the observable TCP reset and handshake may create a detectable pattern.</ref>, the encryption keys for both plaintext and length encryption are cycled every 224 messages, by switching to a new key that is generated by the key stream using the old key.
==== Handshake: key exchange and version negotiation ====
@@ -181,26 +183,24 @@ As explained before, these messages are sent to set up the connection:
----------------------------------------------------------------------------------------------------
| Initiator Responder |
| |
- | x, ellswift_X = ellswift_create(initiating=True) |
+ | x, ellswift_X = ellswift_create() |
| |
- | --- ellswift_X + initiator_garbage (initiator_garbage_len bytes; max 4095) ---> |
+ | ---- ellswift_X + initiator_garbage (initiator_garbage_len bytes; max 4095) ---> |
| |
- | y, ellswift_Y = ellswift_create(initiating=False) |
+ | y, ellswift_Y = ellswift_create() |
| ecdh_secret = v2_ecdh( |
| y, ellswift_X, ellswift_Y, initiating=False) |
| v2_initialize(initiator, ecdh_secret, initiating=False) |
| |
- | <-- ellswift_Y + responder_garbage (responder_garbage_len bytes; max 4095) + |
- | responder_garbage_terminator (16 bytes) + |
- | v2_enc_packet(initiator, b'', aad=responder_garbage) + |
- | v2_enc_packet(initiator, RESPONDER_TRANSPORT_VERSION) --- |
+ | <--- ellswift_Y + responder_garbage (responder_garbage_len bytes; max 4095) + |
+ | responder_garbage_terminator (16 bytes) + |
+ | v2_enc_packet(initiator, RESPONDER_TRANSPORT_VERSION, aad=responder_garbage) ---- |
| |
| ecdh_secret = v2_ecdh(x, ellswift_Y, ellswift_X, initiating=True) |
| v2_initialize(responder, ecdh_secret, initiating=True) |
| |
- | --- initiator_garbage_terminator (16 bytes) + |
- | v2_enc_packet(responder, b'', aad=initiator_garbage) + |
- | v2_enc_packet(responder, INITIATOR_TRANSPORT_VERSION) ---> |
+ | ---- initiator_garbage_terminator (16 bytes) + |
+ | v2_enc_packet(responder, INITIATOR_TRANSPORT_VERSION, aad=initiator_garbage) ---> |
| |
----------------------------------------------------------------------------------------------------
</pre>
@@ -242,19 +242,19 @@ To define the needed functions, we first introduce a helper function, matching t
** For every ''x'' in ''{u + 4Y<sup>2</sup>, (-X/Y - u)/2, (X/Y - u)/2}'' (all ''mod p''; the order matters):
*** If ''lift_x(x)'' succeeds, return ''x''. There is at least one such ''x''.
-To find encodings of a given X coordinate ''x'', we first need the inverse of ''XSwiftEC''. The function ''XSwiftECInv(x, u, case)'' either returns ''t'' such that ''XSwiftEC(u, t) = x'', or ''None''. The ''case'' variable is an integer in range 0 to 7 inclusive, which selects which of the up to 8 valid such ''t'' values to return:
+To find encodings of a given X coordinate ''x'', we first need the inverse of ''XSwiftEC''. The function ''XSwiftECInv(x, u, case)'' either returns ''t'' such that ''XSwiftEC(u, t) = x'', or ''None''. The ''case'' variable is an integer in range ''0..7'', which selects which of the up to 8 valid such ''t'' values to return:
* ''XSwiftECInv(x, u, case)'':
** If ''case & 2 = 0'':
*** If ''lift_x(-x - u)'' succeeds, return ''None''.
*** Let ''v = x''.
*** Let ''s = -(u<sup>3</sup> + 7)/(u<sup>2</sup> + uv + v<sup>2</sup>) (mod p)''.
-** If ''case & 2 = 2'':
+** Else (''case & 2 = 2''):
*** Let ''s = x - u (mod p)''.
*** If ''s = 0'', return ''None''.
*** Let ''r'' be the square root of ''-s(4(u<sup>3</sup> + 7) + 3u<sup>2</sup>s) (mod p).''<ref name="modsqrt">'''How to compute a square root mod ''p''?''' Due to the structure of ''p'', a candidate for the square root of ''a'' mod ''p'' can be computed as ''x = a<sup>(p+1)/4</sup> mod p''. If ''a'' is not a square mod ''p'', this formula returns the square root of ''-a mod p'' instead, so it is necessary to verify that ''x<sup>2</sup> mod p = a''. If that is the case ''-x mod p'' is a solution too, but we define "the" square root to be equal to that expression (the square root will therefore always be a square itself, as ''(p+1)/4'' is even). This algorithm is a specialization of the [https://en.wikipedia.org/wiki/Tonelli%E2%80%93Shanks_algorithm Tonelli-Shanks algorithm].</ref> Return ''None'' if it does not exist.
-** If ''case & 1 = 1'' and ''r = 0'', return ''None''.
-*** Let ''v = (-u + r/s)/2''.
+*** If ''case & 1 = 1'' and ''r = 0'', return ''None''.
+*** Let ''v = (r/s - u)/2''.
** Let ''w'' be the square root of ''s (mod p)''. Return ''None'' if it does not exist.
** If ''case & 5 = 0'', return ''-w(u(1 - c)/2 + v)''.
** If ''case & 5 = 1'', return ''w(u(1 + c)/2 + v)''.
@@ -333,51 +333,58 @@ To establish a v2 encrypted connection, the initiator generates an ephemeral sec
<pre>
def initiate_v2_handshake(peer, garbage_len):
- peer.privkey_ours, peer.ellswift_ours = ellswift_create(initiating=True)
+ peer.privkey_ours, peer.ellswift_ours = ellswift_create()
peer.sent_garbage = rand_bytes(garbage_len)
send(peer, peer.ellswift_ours + peer.sent_garbage)
</pre>
-The responder generates an ephemeral keypair for itself and derives the shared ECDH secret (using the first 64 received bytes) which enables it to instantiate the encrypted transport. It then sends 64 bytes of the unencrypted ElligatorSwift encoding of its own public key and its own <code>responder_garbage</code> also of length <code>garbage_len < 4096</code>. If the first 12 bytes received match the v1 prefix, the v1 protocol is used instead.
+The responder generates an ephemeral keypair for itself and derives the shared ECDH secret (using the first 64 received bytes) which enables it to instantiate the encrypted transport. It then sends 64 bytes of the unencrypted ElligatorSwift encoding of its own public key and its own <code>responder_garbage</code> also of length <code>garbage_len < 4096</code>. If the first 16 bytes received match the v1 prefix, the v1 protocol is used instead.
<pre>
TRANSPORT_VERSION = b''
NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9' # Mainnet network magic; differs on other networks.
-V1_PREFIX = NETWORK_MAGIC + b'version\x00'
+V1_PREFIX = NETWORK_MAGIC + b'version\x00\x00\x00\x00\x00'
def respond_v2_handshake(peer, garbage_len):
peer.received_prefix = b""
- while len(peer.received_prefix) < 12:
+ while len(peer.received_prefix) < len(V1_PREFIX):
peer.received_prefix += receive(peer, 1)
if peer.received_prefix[-1] != V1_PREFIX[len(peer.received_prefix) - 1]:
- peer.privkey_ours, peer.ellswift_ours = ellswift_create(initiating=False)
+ peer.privkey_ours, peer.ellswift_ours = ellswift_create()
peer.sent_garbage = rand_bytes(garbage_len)
send(peer, ellswift_Y + peer.sent_garbage)
return
use_v1_protocol()
</pre>
-Upon receiving the encoded responder public key, the initiator derives the shared ECDH secret and instantiates the encrypted transport. It then sends the derived 16-byte <code>initiator_garbage_terminator</code> followed by an authenticated, encrypted packet with empty contents<ref name="send_empty_garbauth">'''Does the content of the garbage authentication packet need to be empty?''' The receiver ignores the content of the garbage authentication packet, so its content can be anything, and it can in principle be used as a shaping mechanism too. There is however no need for that, as immediately afterwards the initiator can start using decoy packets as (much more flexible) shaping mechanism instead.</ref> to authenticate the garbage, and its own version packet. It then receives the responder's garbage and garbage authentication packet (delimited by the garbage terminator), and checks if the garbage is authenticated correctly. The responder performs very similar steps, but includes the earlier received prefix bytes in the public key. As mentioned before, the encrypted packets for the '''version negotiation phase''' can be piggybacked with the garbage authentication packet to minimize roundtrips.
+Upon receiving the encoded responder public key, the initiator derives the shared ECDH secret and instantiates the encrypted transport. It then sends the derived 16-byte <code>initiator_garbage_terminator</code>, optionally followed by an arbitrary number of decoy packets. Afterwards, it receives the responder's garbage (delimited by the garbage terminator). The responder performs very similar steps but includes the earlier received prefix bytes in the public key. Both the initiator and the responder set the AAD of the first encrypted packet they send after the garbage terminator (i.e., either an optional decoy packet or the version packet) to the garbage they have just sent, not including the garbage terminator.
<pre>
-def complete_handshake(peer, initiating):
+def complete_handshake(peer, initiating, decoy_content_lengths=[]):
received_prefix = b'' if initiating else peer.received_prefix
ellswift_theirs = receive(peer, 64 - len(received_prefix))
+ if not initiating and ellswift_theirs[4:16] == V1_PREFIX[4:16]:
+ # Looks like a v1 peer from the wrong network.
+ disconnect(peer)
ecdh_secret = v2_ecdh(peer.privkey_ours, ellswift_theirs, peer.ellswift_ours,
initiating=initiating)
initialize_v2_transport(peer, ecdh_secret, initiating=True)
- # Send garbage terminator + garbage authentication packet + version packet.
- send(peer, peer.send_garbage_terminator +
- v2_enc_packet(peer, b'', aad=peer.sent_garbage) +
- v2_enc_packet(peer, TRANSPORT_VERSION))
+ # Send garbage terminator
+ send(peer, peer.send_garbage_terminator)
+ # Optionally send decoy packets after garbage terminator.
+ aad = peer.sent_garbage
+ for decoy_content_len in decoy_content_lengths:
+ send(v2_enc_packet(peer, decoy_content_len * b'\x00', aad=aad))
+ aad = b''
+ # Send version packet.
+ send(v2_enc_packet(peer, TRANSPORT_VERSION, aad=aad))
# Skip garbage, until encountering garbage terminator.
received_garbage = recv(peer, 16)
for i in range(4096):
if received_garbage[-16:] == peer.recv_garbage_terminator:
- # Receive, decode, and ignore garbage authentication packet (decoy or not)
- v2_receive_packet(peer, aad=received_garbage, skip_decoy=False)
- # Receive, decode, and ignore version packet, skipping decoys
- v2_receive_packet(peer)
+ # Receive, decode, and ignore version packet.
+ # This includes skipping decoys and authenticating the received garbage.
+ v2_receive_packet(peer, aad=received_garbage)
return
else:
received_garbage += recv(peer, 1)
@@ -396,7 +403,7 @@ Packet encryption is built on two existing primitives:
* '''ChaCha20Poly1305''' is specified as <code>AEAD_CHACHA20_POLY1305</code> in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.8 RFC 8439 section 2.8]. It is an authenticated encryption protocol with associated data (AEAD), taking a 256-bit key, 96-bit nonce, and an arbitrary-length byte array of associated authenticated data (AAD). Due to the built-in authentication tag, ciphertexts are 16 bytes longer than the corresponding plaintext. In what follows:
** <code>aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''plaintext'', and returns a byte array ''ciphertext'', 16 bytes longer than the plaintext.
** <code>aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''ciphertext'', and returns either a byte array ''plaintext'' (16 bytes shorter than the ciphertext), or ''None'' in case the ciphertext was not a valid ChaCha20Poly1305 encryption of any plaintext with the specified ''key'', ''nonce'', and ''aad''.
-* The '''ChaCha20 Block Function''' is specified in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.8 RFC 8439 section 2.3]. It is a pseudorandom function (PRF) taking a 256-bit key, 96-bit nonce, and 32-bit counter, and outputs 64 pseudorandom bytes. It is the underlying building block on which ChaCha20 (and ultimately, ChaCha20Poly1305) is built. In what follows:
+* The '''ChaCha20 Block Function''' is specified in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.3 RFC 8439 section 2.3]. It is a pseudorandom function (PRF) taking a 256-bit key, 96-bit nonce, and 32-bit counter, and outputs 64 pseudorandom bytes. It is the underlying building block on which ChaCha20 (and ultimately, ChaCha20Poly1305) is built. In what follows:
** <code>chacha20_block(key, nonce, count)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', and an integer ''count'' in range ''0..2<sup>32</sup>-1'', and returns a byte array of length 64.
These will be used for plaintext encryption and length encryption, respectively.
@@ -405,7 +412,7 @@ These will be used for plaintext encryption and length encryption, respectively.
To provide re-keying every 224 packets, we specify two wrappers.
-The first is '''FSChaCha20Poly1305''', which represents a ChaCha20Poly1305 AEAD, which automatically changes the nonce after every message, and rekeys every 224 messages by encrypting 32 zero bytes<ref name="rekey_why_aead">'''Why is rekeying implemented in terms of an invocation of the AEAD?''' This means the FSChaCha20Poly1305 wrapper can be thought of as a pure layer around the ChaCha20Poly1305 AEAD. Actual implementations can take advantage of the fact that this formulation is equivalent to using byte 64 through 95 of the keystream output of the underlying ChaCha20 cipher as new key, avoiding the need for Poly1305 in the process.</ref>, and using the first 32 bytes of the result. Each message will be used for one packet. Note that in our protocol, any FSChaCha20Poly1305 instance is always either exclusively encryption or exclusively decryption, as separate instances are used for each direction of the protocol. The nonce used for a message is composed of the 32-bit little endian encoding of the number of messages with the current key, followed by the 64-bit little endian encoding of the number of rekeyings performed. For rekeying, the first 32-bit integer is set to ''0xffffffff''.
+The first is '''FSChaCha20Poly1305''', which represents a ChaCha20Poly1305 AEAD, which automatically changes the nonce after every message, and rekeys every 224 messages by encrypting 32 zero bytes<ref name="rekey_why_aead">'''Why is rekeying implemented in terms of an invocation of the AEAD?''' This means the FSChaCha20Poly1305 wrapper can be thought of as a pure layer around the ChaCha20Poly1305 AEAD. Actual implementations can take advantage of the fact that this formulation is equivalent to using byte 64 through 95 of the keystream output of the underlying ChaCha20 cipher as new key, avoiding the need for Poly1305 in the process.</ref>, and using the first 32 bytes of the result. Each message will be used for one packet. Note that in our protocol, any FSChaCha20Poly1305 instance is always either exclusively encryption or exclusively decryption, as separate instances are used for each direction of the protocol. The nonce used for a message is composed of the 32-bit little-endian encoding of the number of messages with the current key, followed by the 64-bit little-endian encoding of the number of rekeyings performed. For rekeying, the first 32-bit integer is set to ''0xffffffff''.
<pre>
REKEY_INTERVAL = 224
@@ -437,7 +444,7 @@ class FSChaCha20Poly1305:
return self.crypt(aad, plaintext, False)
</pre>
-The second is '''FSChaCha20''', a (single) stream cipher which is used for the lengths of all packets. Encryption and decryption are identical here, so a single function <code>crypt</code> is exposed. It XORs the input with bytes generated using the ChaCha20 block function, rekeying every 224 chunks using the next 32 bytes of the block function output as new key. A ''chunk'' refers here to a single invocation of <code>crypt</code>. As explained before, the same cipher is used for 224 consecutive chunks, to avoid wasting cipher output. The nonce used for these batches of 224 chunks is composed of 4 zero bytes followed by the 64-bit little endian encoding of the number of rekeyings performed. The block counter is reset to 0 after every rekeying.
+The second is '''FSChaCha20''', a (single) stream cipher which is used for the lengths of all packets. Encryption and decryption are identical here, so a single function <code>crypt</code> is exposed. It XORs the input with bytes generated using the ChaCha20 block function, rekeying every 224 chunks using the next 32 bytes of the block function output as new key. A ''chunk'' refers here to a single invocation of <code>crypt</code>. As explained before, the same cipher is used for 224 consecutive chunks, to avoid wasting cipher output. The nonce used for these batches of 224 chunks is composed of 4 zero bytes followed by the 64-bit little-endian encoding of the number of rekeyings performed. The block counter is reset to 0 after every rekeying.
<pre>
class FSChaCha20:
@@ -490,17 +497,19 @@ def v2_enc_packet(peer, contents, aad=b'', ignore=False):
<pre>
CHACHA20POLY1305_EXPANSION = 16
-def v2_receive_packet(peer, aad=b'', skip_decoy=True):
+def v2_receive_packet(peer, aad=b''):
while True:
enc_contents_len = receive(peer, LENGTH_FIELD_LEN)
contents_len = int.from_bytes(peer.recv_L.crypt(enc_contents_len), 'little')
aead_ciphertext = receive(peer, HEADER_LEN + contents_len + CHACHA20POLY1305_EXPANSION)
- plaintext = peer.recv_P.decrypt(aead_ciphertext)
+ plaintext = peer.recv_P.decrypt(aad, aead_ciphertext)
if plaintext is None:
disconnect(peer)
break
+ # Only the first packet is expected to have non-empty AAD.
+ aad = b''
header = plaintext[:HEADER_LEN]
- if not (skip_decoy and header[0] & (1 << IGNORE_BIT_POS)):
+ if not (header[0] & (1 << IGNORE_BIT_POS)):
return plaintext[HEADER_LEN:]
</pre>
@@ -515,12 +524,12 @@ v2 Bitcoin P2P transport layer packets use the encrypted message structure shown
{|class="wikitable"
! Field !! Size in bytes !! Comments
|-
-| <code>message_type</code> || ''1..13'' || either a one byte ID or an ASCII string prefixed with a length byte
+| <code>message_type</code> || 1 or 13 || either a one byte ID in range ''1..255'' or <code>b'\x00'</code> followed by a 12-byte ASCII message type (as in the v1 P2P protocol)
|-
| <code>message_payload</code> || <code>message_length</code> || message payload
|}
-If the first byte of <code>message_type</code> is in the range ''1..12'', it is interpreted as the number of ASCII bytes that follow for the message type. If it is in the range ''13..255'', it is interpreted as a message type ID. This structure results in smaller messages than the v1 protocol as most messages sent/received will have a message type ID.<ref name="smaller_messages">'''How do the length between v1 and v2 compare?''' For messages that use the 1-byte short message type ID, v2 packets use 3 bytes less per message than v1.</ref>
+If the first byte of <code>message_type</code> is <code>b'\x00'</code>, the following 12 bytes are interpreted as an ASCII message type (as in the v1 P2P protocol), trailing padded with <code>b'\x00'</code> as necessary. If the first byte of <code>message_type</code> is in the range ''1..255'', it is interpreted as a message type ID. This structure results in smaller messages than the v1 protocol, as most messages sent/received will have a message type ID. We recommend reserving 1-byte type IDs for message types that are sent more than once per direction per connection.<ref name="smaller_messages">'''How do the lengths between v1 and v2 compare?''' For messages that use the 1-byte short message type ID, v2 packets use 3 bytes less per message than v1.</ref><ref name"fixed_length_long_ids">'''Why not allow variable length long message type IDs?''' Allowing for variable length long IDs reduces the available 1-byte ID space by 12 (to encode the length itself) and incentivizes less descriptive message types. In addition, limiting message types to fixed lengths of 1 or 13 hampers traffic analysis.</ref>
The following table lists currently defined message type IDs:
@@ -533,50 +542,35 @@ The following table lists currently defined message type IDs:
!3
|-
!+0
-|(undefined)||(1 byte string)||(2 byte string)||(3 byte string)
+|(12 bytes follow)||<code>ADDR</code>||<code>BLOCK</code>||<code>BLOCKTXN</code>
|-
!+4
-|(4 byte string)||(5 byte string)||(6 byte string)||(7 byte string)
+|<code>CMPCTBLOCK</code>||<code>FEEFILTER</code>||<code>FILTERADD</code>||<code>FILTERCLEAR</code>
|-
!+8
-|(8 byte string)||(9 byte string)||(10 byte string)||(11 byte string)
+|<code>FILTERLOAD</code>||<code>GETBLOCKS</code>||<code>GETBLOCKTXN</code>||<code>GETDATA</code>
|-
!+12
-|(12 byte string)||<code>ADDR</code>||<code>BLOCK</code>||<code>BLOCKTXN</code>
+|<code>GETHEADERS</code>||<code>HEADERS</code>||<code>INV</code>||<code>MEMPOOL</code>
|-
!+16
-|<code>CMPCTBLOCK</code>||<code>FEEFILTER</code>||<code>FILTERADD</code>||<code>FILTERCLEAR</code>
+|<code>MERKLEBLOCK</code>||<code>NOTFOUND</code>||<code>PING</code>||<code>PONG</code>
|-
!+20
-|<code>FILTERLOAD</code>||<code>GETADDR</code>||<code>GETBLOCKS</code>||<code>GETBLOCKTXN</code>
+|<code>SENDCMPCT</code>||<code>TX</code>||<code>GETCFILTERS</code>||<code>CFILTER</code>
|-
!+24
-|<code>GETDATA</code>||<code>GETHEADERS</code>||<code>HEADERS</code>||<code>INV</code>
-|-
-!+28
-|<code>MEMPOOL</code>||<code>MERKLEBLOCK</code>||<code>NOTFOUND</code>||<code>PING</code>
-|-
-!+32
-|<code>PONG</code>||<code>SENDCMPCT</code>||<code>SENDHEADERS</code>||<code>TX</code>
-|-
-!+36
-|<code>VERACK</code>||<code>VERSION</code>||<code>GETCFILTERS</code>||<code>CFILTER</code>
-|-
-!+40
|<code>GETCFHEADERS</code>||<code>CFHEADERS</code>||<code>GETCFCHECKPT</code>||<code>CFCHECKPT</code>
|-
-!+44
-|<code>WTXIDRELAY</code>||<code>ADDRV2</code>||<code>SENDADDRV2</code>||<code>SENDTXRCNCL</code>
-|-
-!+48
-|<code>REQRECON</code>||<code>SKETCH</code>||<code>REQSKETCHEXT</code>||<code>RECONCILDIFF</code>
+!+28
+|<code>ADDRV2</code>
|-
-!&geq;52
+!&geq;29
|| colspan="4" | (undefined)
|}
-The message types may be updated separately after BIP finalization.
+Additional message types may be added separately after BIP finalization.
=== Signaling specification ===
==== Signaling v2 support ====
@@ -586,8 +580,8 @@ Peers supporting the v2 transport protocol signal support by advertising the <co
== Test Vectors ==
For development and testing purposes, we provide a collection of test vectors in CSV format, and a naive, highly inefficient, [[bip-0324/reference.py|reference implementation]] of the relevant algorithms. This code is for demonstration purposes only:
-* [[bip-0324/ellswift_decode_test_vectors.csv|XElligatorSwift decoding vectors]] give examples of ElligatorSwift-encoded public keys, and the X coordinate they map to.
-* [[bip-0324/xswiftec_inv_test_vectors.csv|XSwiftECInv vectors]] give examples of ''(u, x)'' pairs, and the various ''t'' values that ''xswiftec_inv'' maps them to.
+* [[bip-0324/ellswift_decode_test_vectors.csv|XElligatorSwift decoding vectors]] provide examples of ElligatorSwift-encoded public keys, and the X coordinate they map to.
+* [[bip-0324/xswiftec_inv_test_vectors.csv|XSwiftECInv vectors]] provide examples of ''(u, x)'' pairs, and the various ''t'' values that ''xswiftec_inv'' maps them to.
* [[bip-0324/packet_encoding_test_vectors.csv|Packet encoding vectors]] illustrate the lifecycle of the authenticated encryption scheme proposed in this document.
== Rationale and References ==
@@ -599,3 +593,4 @@ Thanks to everyone (last name order) that helped invent and develop the ideas in
* Matt Corallo
* Lloyd Fournier
* Gregory Maxwell
+* Anthony Towns
diff --git a/bip-0327.mediawiki b/bip-0327.mediawiki
new file mode 100644
index 0000000..07b40f5
--- /dev/null
+++ b/bip-0327.mediawiki
@@ -0,0 +1,829 @@
+<pre>
+ BIP: 327
+ Title: MuSig2 for BIP340-compatible Multi-Signatures
+ Author: Jonas Nick <jonasd.nick@gmail.com>
+ Tim Ruffing <crypto@timruffing.de>
+ Elliott Jin <elliott.jin@gmail.com>
+ Status: Draft
+ License: BSD-3-Clause
+ Type: Informational
+ Created: 2022-03-22
+ Post-History: 2022-04-05: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-April/020198.html [bitcoin-dev] MuSig2 BIP
+ 2022-10-11: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html [bitcoin-dev] MuSig2 BIP
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0327
+</pre>
+
+== Introduction ==
+
+=== Abstract ===
+
+This document proposes a standard for the [https://eprint.iacr.org/2020/1261.pdf MuSig2] multi-signature scheme.
+The standard is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] public keys and signatures.
+It supports ''tweaking'', which allows deriving [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] child keys from aggregate public keys and creating [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] Taproot outputs with key and script paths.
+
+=== Copyright ===
+
+This document is licensed under the 3-clause BSD license.
+
+=== Motivation ===
+
+MuSig2 is a multi-signature scheme that allows multiple signers to create a single aggregate public key and cooperatively create ordinary Schnorr signatures valid under the aggregate public key.
+Signing requires interaction between ''all'' signers involved in key aggregation.
+(MuSig2 is a ''n-of-n'' multi-signature scheme and not a ''t-of-n'' threshold-signature scheme.)
+
+The primary motivation is to create a standard that allows users of different software projects to jointly control Taproot outputs ([https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341]).
+Such an output contains a public key which, in this case, would be the aggregate of all users' individual public keys.
+It can be spent using MuSig2 to produce a signature for the key-based spending path.
+
+The on-chain footprint of a MuSig2 Taproot output is essentially a single BIP340 public key, and a transaction spending the output only requires a single signature cooperatively produced by all signers. This is '''more compact''' and has '''lower verification cost''' than each signer providing an individual public key and signature, as would be required by an ''n-of-n'' policy implemented using <code>OP_CHECKSIGADD</code> as introduced in ([https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki BIP342]).
+As a side effect, the number ''n'' of signers is not limited by any consensus rules when using MuSig2.
+
+Moreover, MuSig2 offers a '''higher level of privacy''' than <code>OP_CHECKSIGADD</code>: MuSig2 Taproot outputs are indistinguishable for a blockchain observer from regular, single-signer Taproot outputs even though they are actually controlled by multiple signers. By tweaking an aggregate public key, the shared Taproot output can have script spending paths that are hidden unless used.
+
+There are multi-signature schemes other than MuSig2 that are fully compatible with Schnorr signatures.
+The MuSig2 variant proposed below stands out by combining all the following features:
+* '''Simple Key Setup''': Key aggregation is non-interactive and fully compatible with BIP340 public keys.
+* '''Two Communication Rounds''': MuSig2 is faster in practice than previous three-round multi-signature schemes such as [https://eprint.iacr.org/2018/068.pdf MuSig1], particularly when signers are connected through high-latency anonymous links. Moreover, the need for fewer communication rounds simplifies the algorithms and reduces the probability that implementations and users make security-relevant mistakes.
+* '''Provable security''': MuSig2 has been [https://eprint.iacr.org/2020/1261.pdf proven existentially unforgeable] under the algebraic one-more discrete logarithm (AOMDL) assumption (instead of the discrete logarithm assumption required for single-signer Schnorr signatures). AOMDL is a falsifiable and weaker variant of the well-studied OMDL problem.
+* '''Low complexity''': MuSig2 has a substantially lower computational and implementation complexity than alternative schemes like [https://eprint.iacr.org/2020/1057 MuSig-DN]. However, this comes at the cost of having no ability to generate nonces deterministically and the requirement to securely handle signing state.
+
+=== Design ===
+
+* '''Compatibility with BIP340''': In this proposal, the aggregate public key is a BIP340 X-only public key, and the signature output at the end of the signing protocol is a BIP340 signature that passes BIP340 verification for the aggregate public key and a message. The individual public keys that are input to the key aggregation algorithm are ''plain'' public keys in compressed format.
+* '''Tweaking for BIP32 derivations and Taproot''': This proposal supports tweaking aggregate public keys and signing for tweaked aggregate public keys. We distinguish two modes of tweaking: ''Plain'' tweaking can be used to derive child aggregate public keys per [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32]. ''X-only'' tweaking, on the other hand, allows creating a [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] tweak to add script paths to a Taproot output. See [[#tweaking-the-aggregate-public-key|below]] for details.
+* '''Non-interactive signing with preprocessing''': The first communication round, exchanging the nonces, can happen before the message or the exact set of signers is determined. Once the parameters of the signing session are finalized, the signers can send partial signatures without additional interaction.
+* '''Key aggregation optionally independent of order''': The output of the key aggregation algorithm depends on the order in which the individual public keys are provided as input. Key aggregation does not sort the individual public keys by default because applications often already have a canonical order of signers. Nonetheless, applications can mandate sorting before aggregation,<ref>Applications that sort individual public keys before aggregation should ensure that the implementation of sorting is reasonably efficient, and in particular does not degenerate to quadratic runtime on pathological inputs.</ref> and this proposal specifies a canonical order to sort the individual public keys before key aggregation. Sorting will ensure the same output, independent of the initial order.
+* '''Third-party nonce and partial signature aggregation''': Instead of every signer sending their nonce and partial signature to every other signer, it is possible to use an untrusted third-party ''aggregator'' in order to reduce the communication complexity from quadratic to linear in the number of signers. In each of the two rounds, the aggregator collects all signers' contributions (nonces or partial signatures), aggregates them, and broadcasts the aggregate back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
+* '''Partial signature verification''': If any signer sends a partial signature contribution that was not created by honestly following the signing protocol, the signing session will fail to produce a valid Schnorr signature. This proposal specifies a partial signature verification algorithm to identify disruptive signers. It is incompatible with third-party nonce aggregation because the individual nonce is required for partial verification.
+* '''MuSig2* optimization''': This proposal uses an optimized scheme MuSig2*, which allows saving a point multiplication in key aggregation as compared to MuSig2. MuSig2* is proven secure in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]. The optimization consists of assigning the constant key aggregation coefficient ''1'' to the second distinct key in the list of individual public keys to be aggregated (as well as to any key identical to this key).
+* '''Size of the nonce and security''': In this proposal, each signer's nonce consists of two elliptic curve points. The [https://eprint.iacr.org/2020/1261 MuSig2 paper] gives distinct security proofs depending on the number of points that constitute a nonce. See section [[#choosing-the-size-of-the-nonce|Choosing the Size of the Nonce]] for a discussion.
+
+== Overview ==
+
+Implementers must make sure to understand this section thoroughly to avoid subtle mistakes that may lead to catastrophic failure.
+
+=== Optionality of Features ===
+
+The goal of this proposal is to support a wide range of possible application scenarios.
+Given a specific application scenario, some features may be unnecessary or not desirable, and implementers can choose not to support them.
+Such optional features include:
+* Applying plain tweaks after x-only tweaks.
+* Applying tweaks at all.
+* Dealing with messages that are not exactly 32 bytes.
+* Identifying a disruptive signer after aborting (aborting itself remains mandatory).
+* Dealing with duplicate individual public keys in key aggregation.
+If applicable, the corresponding algorithms should simply fail when encountering inputs unsupported by a particular implementation. (For example, the signing algorithm may fail when given a message which is not 32 bytes.)
+Similarly, the test vectors that exercise the unimplemented features should be re-interpreted to expect an error, or be skipped if appropriate.
+
+=== General Signing Flow ===
+
+The signers start by exchanging their individual public keys and computing an aggregate public key using the ''KeyAgg'' algorithm.
+Whenever they want to sign a message, the basic order of operations to create a multi-signature is as follows:
+
+'''First broadcast round:'''
+The signers start the signing session by running ''NonceGen'' to compute ''secnonce'' and ''pubnonce''.<ref>We treat the ''secnonce'' and ''pubnonce'' as grammatically singular even though they include serializations of two scalars and two elliptic curve points, respectively. This treatment may be confusing for readers familiar with the MuSig2 paper. However, serialization is a technical detail that is irrelevant for users of MuSig2 interfaces.</ref>
+Then, the signers broadcast their ''pubnonce'' to each other and run ''NonceAgg'' to compute an aggregate nonce.
+
+'''Second broadcast round:'''
+At this point, every signer has the required data to sign, which, in the algorithms specified below, is stored in a data structure called [[#session-context|Session Context]].
+Every signer computes a partial signature by running ''Sign'' with the secret signing key, the ''secnonce'' and the session context.
+Then, the signers broadcast their partial signatures to each other and run ''PartialSigAgg'' to obtain the final signature.
+If all signers behaved honestly, the result passes [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] verification.
+
+Both broadcast rounds can be optimized by using an aggregator who collects all signers' nonces or partial signatures, aggregates them using ''NonceAgg'' or ''PartialSigAgg'', respectively, and broadcasts the aggregate result back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme, i.e., even a malicious aggregator colluding with all but one signer cannot forge a signature.
+
+'''IMPORTANT''': The ''Sign'' algorithm must '''not''' be executed twice with the same ''secnonce''.
+Otherwise, it is possible to extract the secret signing key from the two partial signatures output by the two executions of ''Sign''.
+To avoid accidental reuse of ''secnonce'', an implementation may securely erase the ''secnonce'' argument by overwriting it with 64 zero bytes after it has been read by ''Sign''.
+A ''secnonce'' consisting of only zero bytes is invalid for ''Sign'' and will cause it to fail.
+
+To simplify the specification of the algorithms, some intermediary values are unnecessarily recomputed from scratch, e.g., when executing ''GetSessionValues'' multiple times.
+Actual implementations can cache these values.
+As a result, the [[#session-context|Session Context]] may look very different in implementations or may not exist at all.
+However, computation of ''GetSessionValues'' and storage of the result must be protected against modification from an untrusted third party.
+This party would have complete control over the aggregate public key and message to be signed.
+
+=== Public Key Aggregation ===
+
+We distinguish between two public key types, namely ''plain public keys'', the key type traditionally used in Bitcoin, and ''X-only public keys''.
+Plain public keys are byte strings of length 33 (often called ''compressed'' format).
+In contrast, X-only public keys are 32-byte strings defined in [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
+
+The individual public keys of signers as input to the key aggregation algorithm ''KeyAgg'' (and to ''GetSessionValues'' and ''PartialSigVerify'') are plain public keys.
+The output of ''KeyAgg'' is a [[#keyagg-context|KeyAgg Context]] which stores information required for tweaking the aggregate public key (see [[#tweaking-the-aggregate-public-key|below]]),
+and it can be used to produce an X-only aggregate public key, or a plain aggregate public key.
+In order to obtain an X-only public key compatible with BIP340 verification, implementations call the ''GetXonlyPubkey'' function with the KeyAgg Context.
+To get the plain aggregate public key, which is required for some applications of [[#tweaking-the-aggregate-public-key|tweaking]], implementations call ''GetPlainPubkey'' instead.
+
+The aggregate public key produced by ''KeyAgg'' (regardless of the type) depends on the order of the individual public keys.
+If the application does not have a canonical order of the signers, the individual public keys can be sorted with the ''KeySort'' algorithm to ensure that the aggregate public key is independent of the order of signers.
+
+The same individual public key is allowed to occur more than once in the input of ''KeyAgg'' and ''KeySort''.
+This is by design: All algorithms in this proposal handle multiple signers who (claim to) have identical individual public keys properly,
+and applications are not required to check for duplicate individual public keys.
+In fact, applications are recommended to omit checks for duplicate individual public keys in order to simplify error handling.
+Moreover, it is often impossible to tell at key aggregation which signer is to blame for the duplicate, i.e., which signer came up with an individual public key honestly and which disruptive signer copied it.
+In contrast, MuSig2 is designed to identify disruptive signers at signing time (see [[#identifiying-disruptive-signers|Identifiying Disruptive Signers]]).
+
+While the algorithms in this proposal are able to handle duplicate individual public keys, there are scenarios where applications may choose to abort when encountering duplicates.
+For example, we can imagine a scenario where a single entity creates a MuSig2 setup with multiple signing devices.
+In that case, duplicates may not result from a malicious signing device copying an individual public key of another signing device but from accidental initialization of two devices with the same seed.
+Since MuSig2 key aggregation would accept the duplicate keys and not error out, which would in turn reduce the security compared to the intended key setup, applications may reject duplicate individual public keys before passing them to MuSig2 key aggregation and ask the user to investigate.
+
+=== Nonce Generation ===
+
+'''IMPORTANT''': ''NonceGen'' must have access to a high-quality random generator to draw an unbiased, uniformly random value ''rand' ''.
+In contrast to BIP340 signing, the values ''k<sub>1</sub>'' and ''k<sub>2</sub>'' '''must not be derived deterministically''' from the session parameters because otherwise active adversaries can [https://medium.com/blockstream/musig-dn-schnorr-multisignatures-with-verifiably-deterministic-nonces-27424b5df9d6#e3b6 trick the victim into reusing a nonce].
+
+The optional arguments to ''NonceGen'' enable a defense-in-depth mechanism that may prevent secret key exposure if ''rand' '' is accidentally not drawn uniformly at random.
+If the value ''rand' '' was identical in two ''NonceGen'' invocations, but any other argument was different, the ''secnonce'' would still be guaranteed to be different as well (with overwhelming probability), and thus accidentally using the same ''secnonce'' for ''Sign'' in both sessions would be avoided.
+Therefore, it is recommended to provide the optional arguments ''sk'', ''aggpk'', and ''m'' if these session parameters are already determined during nonce generation.
+The auxiliary input ''extra_in'' can contain additional contextual data that has a chance of changing between ''NonceGen'' runs,
+e.g., a supposedly unique session id (taken from the application), a session counter wide enough not to repeat in practice, any nonces by other signers (if already known), or the serialization of a data structure containing multiple of the above.
+However, the protection provided by the optional arguments should only be viewed as a last resort.
+In most conceivable scenarios, the assumption that the arguments are different between two executions of ''NonceGen'' is relatively strong, particularly when facing an active adversary.
+
+In some applications, it is beneficial to generate and send a ''pubnonce'' before the other signers, their individual public keys, or the message to sign is known.
+In this case, only the available arguments are provided to the ''NonceGen'' algorithm.
+After this preprocessing phase, the ''Sign'' algorithm can be run immediately when the message and set of signers is determined.
+This way, the final signature is created quicker and with fewer round trips.
+However, applications that use this method presumably store the nonces for a longer time and must therefore be even more careful not to reuse them.
+Moreover, this method is not compatible with the defense-in-depth mechanism described in the previous paragraph.
+
+Instead of every signer broadcasting their ''pubnonce'' to every other signer, the signers can send their ''pubnonce'' to a single aggregator node that runs ''NonceAgg'' and sends the ''aggnonce'' back to the signers.
+This technique reduces the overall communication.
+A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
+
+In general, MuSig2 signers are stateful in the sense that they first generate ''secnonce'' and then need to store it until they receive the other signers' ''pubnonces'' or the ''aggnonce''.
+However, it is possible for one of the signers to be stateless.
+This signer waits until it receives the ''pubnonce'' of all the other signers and until session parameters such as a message to sign, individual public keys, and tweaks are determined.
+Then, the signer can run ''NonceGen'', ''NonceAgg'' and ''Sign'' in sequence and send out its ''pubnonce'' along with its partial signature.
+Stateless signers may want to consider signing deterministically (see [[#modifications-to-nonce-generation|Modifications to Nonce Generation]]) to remove the reliance on the random number generator in the ''NonceGen'' algorithm.
+
+=== Identifying Disruptive Signers ===
+
+The signing protocol makes it possible to identify malicious signers who send invalid contributions to a signing session in order to make the signing session abort and prevent the honest signers from obtaining a valid signature.
+This property is called "identifiable aborts" and ensures that honest parties can assign blame to malicious signers who cause an abort in the signing protocol.
+
+Aborts are identifiable for an honest party if the following conditions hold in a signing session:
+* The contributions received from all signers have not been tampered with (e.g., because they were sent over authenticated connections).
+* Nonce aggregation is performed honestly (e.g., because the honest signer performs nonce aggregation on its own or because the aggregator is trusted).
+* The partial signatures received from all signers are verified using the algorithm ''PartialSigVerify''.
+
+If these conditions hold and an honest party (signer or aggregator) runs an algorithm that fails due to invalid protocol contributions from malicious signers, then the algorithm run by the honest party will output the index of exactly one malicious signer.
+Additionally, if the honest parties agree on the contributions sent by all signers in the signing session, all the honest parties who run the aborting algorithm will identify the same malicious signer.
+
+==== Further Remarks ====
+
+Some of the algorithms specified below may also assign blame to a malicious aggregator.
+While this is possible for some particular misbehavior of the aggregator, it is not guaranteed that a malicious aggregator can be identified.
+More specifically, a malicious aggregator (whose existence violates the second condition above) can always make signing abort and wrongly hold honest signers accountable for the abort (e.g., by claiming to have received an invalid contribution from a particular honest signer).
+
+The only purpose of the algorithm ''PartialSigVerify'' is to ensure identifiable aborts, and it is not necessary to use it when identifiable aborts are not desired.
+In particular, partial signatures are ''not'' signatures.
+An adversary can forge a partial signature, i.e., create a partial signature without knowing the secret key for the claimed individual public key.<ref>Assume an adversary wants to forge a partial signature for individual public key ''P''. It joins the signing session pretending to be two different signers, one with individual public key ''P'' and one with another individual public key. The adversary can then set the second signer's nonce such that it will be able to produce a partial signature for ''P'' but not for the other claimed signer. An explanation of the individual steps required to create a partial signature forgery can be found in [https://gist.github.com/AdamISZ/ca974ed67889cedc738c4a1f65ff620b a write up by Adam Gibson].</ref>
+However, if ''PartialSigVerify'' succeeds for all partial signatures then ''PartialSigAgg'' will return a valid Schnorr signature.<ref>Given a list of individual public keys, it is an open question whether a BIP-340 signature valid under the corresponding aggregate public key is a proof of knowledge of all secret keys of the individual public keys.</ref>
+
+=== Tweaking the Aggregate Public Key ===
+
+The aggregate public key can be ''tweaked'', which modifies the key as defined in the [[#tweaking-definition|Tweaking Definition]] subsection.
+In order to apply a tweak, the KeyAgg Context output by ''KeyAgg'' is provided to the ''ApplyTweak'' algorithm with the ''is_xonly_t'' argument set to false for plain tweaking and true for X-only tweaking.
+The resulting KeyAgg Context can be used to apply another tweak with ''ApplyTweak'' or obtain the aggregate public key with ''GetXonlyPubkey'' or ''GetPlainPubkey''.
+
+In addition to individual public keys, the ''KeyAgg'' algorithm accepts tweaks, which modify the aggregate public key as defined in the [[#tweaking-definition|Tweaking Definition]] subsection.
+For example, if ''KeyAgg'' is run with ''v = 2'', ''is_xonly_t<sub>1</sub> = false'', ''is_xonly_t<sub>2</sub> = true'', then the aggregate key is first plain tweaked with ''tweak<sub>1</sub>'' and then X-only tweaked with ''tweak<sub>2</sub>''.
+
+The purpose of supporting tweaking is to ensure compatibility with existing uses of tweaking, i.e., that the result of signing is a valid signature for the tweaked public key.
+The MuSig2 algorithms take arbitrary tweaks as input but accepting arbitrary tweaks may negatively affect the security of the scheme.<ref>It is an open question whether allowing arbitrary tweaks from an adversary affects the unforgeability of MuSig2.</ref>
+Instead, signers should obtain the tweaks according to other specifications.
+This typically involves deriving the tweaks from a hash of the aggregate public key and some other information.
+Depending on the specific scheme that is used for tweaking, either the plain or the X-only aggregate public key is required.
+For example, to do [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] derivation, you call ''GetPlainPubkey'' to be able to compute the tweak, whereas [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] TapTweaks require X-only public keys that are obtained with ''GetXonlyPubkey''.
+
+The tweak mode provided to ''ApplyTweak'' depends on the application:
+Plain tweaking can be used to derive child public keys from an aggregate public key using [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32].
+On the other hand, X-only tweaking is required for Taproot tweaking per [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341].
+A Taproot-tweaked public key commits to a ''script path'', allowing users to create transaction outputs that are spendable either with a MuSig2 multi-signature or by providing inputs that satisfy the script path.
+Script path spends require a control block that contains a parity bit for the tweaked X-only public key.
+The bit can be obtained with ''GetPlainPubkey(keyagg_ctx)[0] & 1''.
+
+== Algorithms ==
+
+The following specification of the algorithms has been written with a focus on clarity.
+As a result, the specified algorithms are not always optimal in terms of computation and space.
+In particular, some values are recomputed but can be cached in actual implementations (see [[#signing-flow|Signing Flow]]).
+
+=== Notation ===
+
+The following conventions are used, with constants as defined for [https://www.secg.org/sec2-v2.pdf secp256k1]. We note that adapting this proposal to other elliptic curves is not straightforward and can result in an insecure scheme.
+* Lowercase variables represent integers or byte arrays.
+** The constant ''p'' refers to the field size, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F''.
+** The constant ''n'' refers to the curve order, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141''.
+* Uppercase variables refer to points on the curve with equation ''y<sup>2</sup> = x<sup>3</sup> + 7'' over the integers modulo ''p''.
+** ''is_infinite(P)'' returns whether ''P'' is the point at infinity.
+** ''x(P)'' and ''y(P)'' are integers in the range ''0..p-1'' and refer to the X and Y coordinates of a point ''P'' (assuming it is not infinity).
+** The constant ''G'' refers to the base point, for which ''x(G) = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798'' and ''y(G) = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8''.
+** Addition of points refers to the usual [https://en.wikipedia.org/wiki/Elliptic_curve#The_group_law elliptic curve group operation].
+** [https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication Multiplication (⋅) of an integer and a point] refers to the repeated application of the group operation.
+* Functions and operations:
+** ''||'' refers to byte array concatenation.
+** The function ''x[i:j]'', where ''x'' is a byte array and ''i, j &ge; 0'', returns a ''(j - i)''-byte array with a copy of the ''i''-th byte (inclusive) to the ''j''-th byte (exclusive) of ''x''.
+** The function ''bytes(n, x)'', where ''x'' is an integer, returns the n-byte encoding of ''x'', most significant byte first.
+** The constant ''empty_bytestring'' refers to the empty byte array. It holds that ''len(empty_bytestring) = 0''.
+** The function ''xbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''bytes(32, x(P))''.
+** The function ''len(x)'' where ''x'' is a byte array returns the length of the array.
+** The function ''has_even_y(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''y(P) mod 2 == 0''.
+** The function ''with_even_y(P)'', where ''P'' is a point, returns ''P'' if ''is_infinite(P)'' or ''has_even_y(P)''. Otherwise, ''with_even_y(P)'' returns ''-P''.
+** The function ''cbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''a || xbytes(P)'' where ''a'' is a byte that is ''2'' if ''has_even_y(P)'' and ''3'' otherwise.
+** The function ''cbytes_ext(P)'', where ''P'' is a point, returns ''bytes(33, 0)'' if ''is_infinite(P)''. Otherwise, it returns ''cbytes(P)''.
+** The function ''int(x)'', where ''x'' is a 32-byte array, returns the 256-bit unsigned integer whose most significant byte first encoding is ''x''.
+** The function ''lift_x(x)'', where ''x'' is an integer in range ''0..2<sup>256</sup>-1'', returns the point ''P'' for which ''x(P) = x''<ref>
+ Given a candidate X coordinate ''x'' in the range ''0..p-1'', there exist either exactly two or exactly zero valid Y coordinates. If no valid Y coordinate exists, then ''x'' is not a valid X coordinate either, i.e., no point ''P'' exists for which ''x(P) = x''. The valid Y coordinates for a given candidate ''x'' are the square roots of ''c = x<sup>3</sup> + 7 mod p'' and they can be computed as ''y = &plusmn;c<sup>(p+1)/4</sup> mod p'' (see [https://en.wikipedia.org/wiki/Quadratic_residue#Prime_or_prime_power_modulus Quadratic residue]) if they exist, which can be checked by squaring and comparing with ''c''.</ref> and ''has_even_y(P)'', or fails if ''x'' is greater than ''p-1'' or no such point exists. The function ''lift_x(x)'' is equivalent to the following pseudocode:
+*** Fail if ''x &gt; p-1''.
+*** Let ''c = x<sup>3</sup> + 7 mod p''.
+*** Let ''y' = c<sup>(p+1)/4</sup> mod p''.
+*** Fail if ''c &ne; y'<sup>2</sup> mod p''.
+*** Let ''y = y' '' if ''y' mod 2 = 0'', otherwise let ''y = p - y' ''.
+*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y''.
+** The function ''cpoint(x)'', where ''x'' is a 33-byte array (compressed serialization), sets ''P = lift_x(int(x[1:33]))'' and fails if that fails. If ''x[0] = 2'' it returns ''P'' and if ''x[0] = 3'' it returns ''-P''. Otherwise, it fails.
+** The function ''cpoint_ext(x)'', where ''x'' is a 33-byte array (compressed serialization), returns the point at infinity if ''x = bytes(33, 0)''. Otherwise, it returns ''cpoint(x)'' and fails if that fails.
+** The function ''hash<sub>tag</sub>(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''.
+* Other:
+** Tuples are written by listing the elements within parentheses and separated by commas. For example, ''(2, 3, 1)'' is a tuple.
+
+=== Key Generation and Aggregation ===
+
+==== Key Generation of an Individual Signer ====
+
+<div>
+Algorithm ''IndividualPubkey(sk)'':<ref>The ''IndividualPubkey'' algorithm matches the key generation procedure traditionally used for ECDSA in Bitcoin</ref>
+* Inputs:
+** The secret key ''sk'': a 32-byte array, freshly generated uniformly at random
+* Let ''d' = int(sk)''.
+* Fail if ''d' = 0'' or ''d' &ge; n''.
+* Return ''cbytes(d'⋅G)''.
+</div>
+
+==== KeyAgg Context ====
+
+The KeyAgg Context is a data structure consisting of the following elements:
+* The point ''Q'' representing the potentially tweaked aggregate public key: an elliptic curve point
+* The accumulated tweak ''tacc'': an integer with ''0 &le; tacc < n''
+* The value ''gacc'' : 1 or -1 mod n
+
+We write "Let ''(Q, gacc, tacc) = keyagg_ctx''" to assign names to the elements of a KeyAgg Context.
+
+<div>
+Algorithm ''GetXonlyPubkey(keyagg_ctx)'':
+* Let ''(Q, _, _) = keyagg_ctx''
+* Return ''xbytes(Q)''
+</div>
+
+<div>
+Algorithm ''GetPlainPubkey(keyagg_ctx)'':
+* Let ''(Q, _, _) = keyagg_ctx''
+* Return ''cbytes(Q)''
+</div>
+
+==== Key Sorting ====
+
+<div>
+Algorithm ''KeySort(pk<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* Return ''pk<sub>1..u</sub>'' sorted in lexicographical order.
+</div>
+
+==== Key Aggregation ====
+
+<div>
+Algorithm ''KeyAgg(pk<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)''
+* For ''i = 1 .. u'':
+** Let ''P<sub>i</sub> = cpoint(pk<sub>i</sub>)''; fail if that fails and blame signer ''i'' for invalid individual public key.
+** Let ''a<sub>i</sub> = KeyAggCoeffInternal(pk<sub>1..u</sub>, pk<sub>i</sub>, pk2)''.
+* Let ''Q = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''
+* Fail if ''is_infinite(Q)''.
+* Let ''gacc = 1''
+* Let ''tacc = 0''
+* Return ''keyagg_ctx = (Q, gacc, tacc)''.
+</div>
+
+<div>
+Internal Algorithm ''HashKeys(pk<sub>1..u</sub>)'':
+* Return ''hash<sub>KeyAgg list</sub>(pk<sub>1</sub> || pk<sub>2</sub> || ... || pk<sub>u</sub>)''
+</div>
+
+<div>
+Internal Algorithm ''GetSecondKey(pk<sub>1..u</sub>)'':
+* For ''j = 1 .. u'':
+** If ''pk<sub>j</sub> &ne; pk<sub>1</sub>'':
+*** Return ''pk<sub>j</sub>''
+* Return ''bytes(33, 0)''
+</div>
+
+<div>
+Internal Algorithm ''KeyAggCoeff(pk<sub>1..u</sub>, pk')'':
+* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)'':
+* Return ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)''
+</div>
+
+<div>
+Internal Algorithm ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)'':
+* Let ''L = HashKeys(pk<sub>1..u</sub>)''
+* If ''pk' = pk2'':
+** Return 1
+* Return ''int(hash<sub>KeyAgg coefficient</sub>(L || pk')) mod n''<ref>The key aggregation coefficient is computed by hashing the individual public key instead of its index, which requires one more invocation of the SHA-256 compression function. However, it results in significantly simpler implementations because signers do not need to translate between public key indices before and after sorting.</ref>
+</div>
+
+==== Applying Tweaks ====
+
+<div>
+Algorithm ''ApplyTweak(keyagg_ctx, tweak, is_xonly_t)'':
+* Inputs:
+** The ''keyagg_ctx'': a [[#keyagg-context|KeyAgg Context]] data structure
+** The ''tweak'': a 32-byte array
+** The tweak mode ''is_xonly_t'': a boolean
+* Let ''(Q, gacc, tacc) = keyagg_ctx''
+* If ''is_xonly_t'' and ''not has_even_y(Q)'':
+** Let ''g = -1 mod n''
+* Else:
+** Let ''g = 1''
+* Let ''t = int(tweak)''; fail if ''t &ge; n''
+* Let ''Q' = g⋅Q + t⋅G''
+** Fail if ''is_infinite(Q')''
+* Let ''gacc' = g⋅gacc mod n''
+* Let ''tacc' = t + g⋅tacc mod n''
+* Return ''keyagg_ctx' = (Q', gacc', tacc')''
+</div>
+
+=== Nonce Generation ===
+
+<div>
+Algorithm ''NonceGen(sk, pk, aggpk, m, extra_in)'':
+* Inputs:
+** The secret signing key ''sk'': a 32-byte array (optional argument)
+** The individual public key ''pk'': a 33-byte array (see [[#modifications-to-nonce-generation|Modifications to Nonce Generation]] for the reason that this argument is mandatory)
+** The x-only aggregate public key ''aggpk'': a 32-byte array (optional argument)
+** The message ''m'': a byte array (optional argument)<ref name="mlen">In theory, the allowed message size is restricted because SHA256 accepts byte strings only up to size of 2^61-1 bytes (and because of the 8-byte length encoding).</ref>
+** The auxiliary input ''extra_in'': a byte array with ''0 &le; len(extra_in) &le; 2<sup>32</sup>-1'' (optional argument)
+* Let ''rand' '' be a 32-byte array freshly drawn uniformly at random
+* If the optional argument ''sk'' is present:
+** Let ''rand'' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand')''<ref>The random data is hashed (with a unique tag) as a precaution against situations where the randomness may be correlated with the secret signing key itself. It is xored with the secret key (rather than combined with it in a hash) to reduce the number of operations exposed to the actual secret key.</ref>
+* Else:
+** Let ''rand = rand' ''
+* If the optional argument ''aggpk'' is not present:
+** Let ''aggpk = empty_bytestring''
+* If the optional argument ''m'' is not present:
+** Let ''m_prefixed = bytes(1, 0)''
+* Else:
+** Let ''m_prefixed = bytes(1, 1) || bytes(8, len(m)) || m''
+* If the optional argument ''extra_in'' is not present:
+** Let ''extra_in = empty_bytestring''
+* Let ''k<sub>i</sub> = int(hash<sub>MuSig/nonce</sub>(rand || bytes(1, len(pk)) || pk || bytes(1, len(aggpk)) || aggpk || m_prefixed || bytes(4, len(extra_in)) || extra_in || bytes(1, i - 1))) mod n'' for ''i = 1,2''
+* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
+* Let ''R<sub>⁎,1</sub> = k<sub>1</sub>⋅G, R<sub>⁎,2</sub> = k<sub>2</sub>⋅G''
+* Let ''pubnonce = cbytes(R<sub>⁎,1</sub>) || cbytes(R<sub>⁎,2</sub>)''
+* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''<ref name="secnonce">The algorithms as specified here assume that the ''secnonce'' is stored as a 97-byte array using the serialization ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''. The same format is used in the reference implementation and in the test vectors. However, since the ''secnonce'' is (obviously) not meant to be sent over the wire, compatibility between implementations is not a concern, and this method of storing the ''secnonce'' is merely a suggestion.<br />
+The ''secnonce'' is effectively a local data structure of the signer which comprises the value triple ''(k<sub>1</sub>, k<sub>2</sub>, pk)'', and implementations may choose any suitable method to carry it from ''NonceGen'' (first communication round) to ''Sign'' (second communication round). In particular, implementations may choose to hide the ''secnonce'' in internal state without exposing it in an API explicitly, e.g., in an effort to prevent callers from reusing a ''secnonce'' accidentally.</ref>
+* Return ''(secnonce, pubnonce)''
+</div>
+
+=== Nonce Aggregation ===
+
+<div>
+Algorithm ''NonceAgg(pubnonce<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of ''pubnonces'' with ''0 < u < 2^32''
+** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
+* For ''j = 1 .. 2'':
+** For ''i = 1 .. u'':
+*** Let ''R<sub>i,j</sub> = cpoint(pubnonce<sub>i</sub>[(j-1)*33:j*33])''; fail if that fails and blame signer ''i'' for invalid ''pubnonce''.
+** Let ''R<sub>j</sub> = R<sub>1,j</sub> + R<sub>2,j</sub> + ... + R<sub>u,j</sub>''
+* Return ''aggnonce = cbytes_ext(R<sub>1</sub>) || cbytes_ext(R<sub>2</sub>)''
+</div>
+
+=== Session Context ===
+
+The Session Context is a data structure consisting of the following elements:
+* The aggregate public nonce ''aggnonce'': a 66-byte array
+* The number ''u'' of individual public keys with ''0 < u < 2^32''
+* The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* The number ''v'' of tweaks with ''0 &le; v < 2^32''
+* The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+* The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
+* The message ''m'': a byte array<ref name="mlen" />
+
+We write "Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''" to assign names to the elements of a Session Context.
+
+<div>
+Algorithm ''GetSessionValues(session_ctx)'':
+* Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''
+* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
+* For ''i = 1 .. v'':
+** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
+* Let ''(Q, gacc, tacc) = keyagg_ctx<sub>v</sub>''
+* Let ''b = int(hash<sub>MuSig/noncecoef</sub>(aggnonce || xbytes(Q) || m)) mod n''
+* Let ''R<sub>1</sub> = cpoint_ext(aggnonce[0:33]), R<sub>2</sub> = cpoint_ext(aggnonce[33:66])''; fail if that fails and blame nonce aggregator for invalid ''aggnonce''.
+* Let ''R' = R<sub>1</sub> + b⋅R<sub>2</sub>''
+* If ''is_infinite(R'):
+** Let final nonce ''R = G'' (see [[#dealing-with-infinity-in-nonce-aggregation|Dealing with Infinity in Nonce Aggregation]])
+* Else:
+** Let final nonce ''R = R' ''
+* Let ''e = int(hash<sub>BIP0340/challenge</sub>(xbytes(R) || xbytes(Q) || m)) mod n''
+* Return ''(Q, gacc, tacc, b, R, e)''
+</div>
+
+<div>
+Algorithm ''GetSessionKeyAggCoeff(session_ctx, P)'':
+* Let ''(_, u, pk<sub>1..u</sub>, _, _, _, _) = session_ctx''
+* Let ''pk = cbytes(P)''
+* Fail if ''pk'' not in ''pk<sub>1..u</sub>''
+* Return ''KeyAggCoeff(pk<sub>1..u</sub>, pk)''
+</div>
+
+=== Signing ===
+
+<div>
+Algorithm ''Sign(secnonce, sk, session_ctx)'':
+* Inputs:
+** The secret nonce ''secnonce'' that has never been used as input to ''Sign'' before: a 97-byte array<ref name="secnonce" />
+** The secret key ''sk'': a 32-byte array
+** The ''session_ctx'': a [[#session-context|Session Context]] data structure
+* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* Let ''k<sub>1</sub>' = int(secnonce[0:32]), k<sub>2</sub>' = int(secnonce[32:64])''
+* Fail if ''k<sub>i</sub>' = 0'' or ''k<sub>i</sub>' &ge; n'' for ''i = 1..2''
+* Let ''k<sub>1</sub> = k<sub>1</sub>', k<sub>2</sub> = k<sub>2</sub>' '' if ''has_even_y(R)'', otherwise let ''k<sub>1</sub> = n - k<sub>1</sub>', k<sub>2</sub> = n - k<sub>2</sub>' ''
+* Let ''d' = int(sk)''
+* Fail if ''d' = 0'' or ''d' &ge; n''
+* Let ''P = d'⋅G''
+* Let ''pk = cbytes(P)''
+* Fail if ''pk &ne; secnonce[64:97]''
+* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''; fail if that fails<ref>Failing ''Sign'' when ''GetSessionKeyAggCoeff(session_ctx, P)'' fails is not necessary for unforgeability. It merely indicates to the caller that the scheme is not being used correctly.</ref>
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* <div id="Sign negation"></div>Let ''d = g⋅gacc⋅d' mod n'' (See [[negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]])
+* Let ''s = (k<sub>1</sub> + b⋅k<sub>2</sub> + e⋅a⋅d) mod n''
+* Let ''psig = bytes(32, s)''
+* Let ''pubnonce = cbytes(k<sub>1</sub>'⋅G) || cbytes(k<sub>2</sub>'⋅G)''
+* If ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'' (see below) returns failure, fail<ref>Verifying the signature before leaving the signer prevents random or adversarially provoked computation errors. This prevents publishing invalid signatures which may leak information about the secret key. It is recommended but can be omitted if the computation cost is prohibitive.</ref>
+* Return partial signature ''psig''
+</div>
+
+=== Partial Signature Verification ===
+
+<div>
+Algorithm ''PartialSigVerify(psig, pubnonce<sub>1..u</sub>, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, i)'':
+* Inputs:
+** The partial signature ''psig'': a 32-byte array
+** The number ''u'' of public nonces and individual public keys with ''0 < u < 2^32''
+** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+** The number ''v'' of tweaks with ''0 &le; v < 2^32''
+** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+** The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
+** The message ''m'': a byte array<ref name="mlen" />
+** The index of the signer ''i'' in the of public nonces and individual public keys with ''0 < i &le; u''
+* Let ''aggnonce = NonceAgg(pubnonce<sub>1..u</sub>)''; fail if that fails
+* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
+* Run ''PartialSigVerifyInternal(psig, pubnonce<sub>i</sub>, pk<sub>i</sub>, session_ctx)''
+* Return success iff no failure occurred before reaching this point.
+</div>
+
+<div>
+Internal Algorithm ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'':
+* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* Let ''s = int(psig)''; fail if ''s &ge; n''
+* Let ''R<sub>⁎,1</sub> = cpoint(pubnonce[0:33]), R<sub>⁎,2</sub> = cpoint(pubnonce[33:66])''
+* Let ''Re<sub>⁎</sub>' = R<sub>⁎,1</sub> + b⋅R<sub>⁎,2</sub>''
+* Let effective nonce ''Re<sub>⁎</sub> = Re<sub>⁎</sub>' '' if ''has_even_y(R)'', otherwise let ''Re<sub>⁎</sub> = -Re<sub>⁎</sub>' ''
+* Let ''P = cpoint(pk)''; fail if that fails
+* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''<ref>''GetSessionKeyAggCoeff(session_ctx, P)'' cannot fail when called from ''PartialSigVerifyInternal''.</ref>
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* <div id="SigVerify negation"></div>Let ''g' = g⋅gacc mod n'' (See [[#negation-of-the-individual-public-key-when-partially-verifying|Negation Of The Individual Public Key When Partially Verifying]])
+* Fail if ''s⋅G &ne; Re<sub>⁎</sub> + e⋅a⋅g'⋅P''
+* Return success iff no failure occurred before reaching this point.
+</div>
+
+=== Partial Signature Aggregation ===
+
+<div>
+Algorithm ''PartialSigAgg(psig<sub>1..u</sub>, session_ctx)'':
+* Inputs:
+** The number ''u'' of signatures with ''0 < u < 2^32''
+** The partial signatures ''psig<sub>1..u</sub>'': ''u'' 32-byte arrays
+** The ''session_ctx'': a [[#session-context|Session Context]] data structure
+* Let ''(Q, _, tacc, _, _, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* For ''i = 1 .. u'':
+** Let ''s<sub>i</sub> = int(psig<sub>i</sub>)''; fail if ''s<sub>i</sub> &ge; n'' and blame signer ''i'' for invalid partial signature.
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* Let ''s = s<sub>1</sub> + ... + s<sub>u</sub> + e⋅g⋅tacc mod n''
+* Return ''sig = ''xbytes(R) || bytes(32, s)''
+</div>
+
+=== Test Vectors and Reference Code ===
+
+We provide a naive, highly inefficient, and non-constant time [[bip-0327/reference.py|pure Python 3 reference implementation of the key aggregation, partial signing, and partial signature verification algorithms]].
+
+Standalone JSON test vectors are also available in the [[bip-0327|same directory]], to facilitate porting the test vectors into other implementations.
+
+The reference implementation is for demonstration purposes only and not to be used in production environments.
+
+== Remarks on Security and Correctness ==
+
+=== Signing with Tweaked Individual Keys ===
+
+The scheme in this proposal has been designed to be secure
+even if signers tweak their individual secret keys with tweaks known to the adversary (e.g., as in BIP32 unhardened derivation)
+before providing the corresponding individual public keys as input to key aggregation.
+In particular, the scheme as specified above requires each signer to provide a final individual public key ''pk'' already to ''NonceGen'',
+which writes it into the ''secnonce'' array
+so that it can be checked against ''IndividualPubkey(sk)'' in the ''Sign'' algorithm.
+The purpose of this check in ''Sign'' is to ensure that ''pk'',
+and thus the secret key ''sk'' that will be provided to ''Sign'',
+is determined before the signer sends out the ''pubnonce''.
+
+If the check in ''Sign'' was omitted,
+and a signer supported signing with at least two different secret keys ''sk<sub>1</sub>'' and ''sk<sub>2</sub>''
+which have been obtained via tweaking another secret key with tweaks known to the adversary,
+then the adversary could, after having seen the ''pubnonce'',
+influence whether ''sk<sub>1</sub>'' or ''sk<sub>2</sub>'' is provided to ''Sign''.
+This degree of freedom may allow the adversary to perform a generalized birthday attack and thereby forge a signature
+(see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list post] and [https://github.com/jonasnick/musig2-tweaking writeup] for details).
+
+Checking ''pk'' against ''InvidualPubkey(sk)'' is a simple way to ensure
+that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked.
+This removes the adversary's ability to influence the secret key after having seen the ''pubnonce''
+and thus rules out the attack.<ref>Ensuring that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked is a simple policy to rule out the attack,
+but more flexible polices are conceivable.
+In fact, if the signer uses nothing but the message to be signed and the list of the individual public keys of all signers to decide which secret key to use,
+then it is not a problem that the adversary can influence this decision after having seen the ''pubnonce''.<br />
+More formally, consider modified algorithms ''NonceGen' '' and ''Sign' '', where ''NonceGen' '' does not take the individual public key of the signer as input and does not store it in pubnonce, and Sign' does not check read the individual public key from pubnonce and does not check it against the secret key taken as input.
+Then it suffices that for each invocation of ''NonceGen' '' with output ''(secnonce, pubnonce)'',
+a function ''fsk'' is determined before sending out ''pubnonce'',
+where ''fsk'' maps a pair consisting of a list of individual public keys and a message to a secret key,
+such that the secret key ''sk'' and the session context ''session_ctx = (_, _, pk<sub>1..u</sub>, _, _, _, m)''
+provided to the corresponding invocation of ''Sign'(secnonce, sk, session_ctx)'',
+adhere to the condition ''fsk(pk<sub>1..u</sub>, m) = sk''.<br />
+However, this requirement is complex and hard to enforce in implementations.
+The algorithms ''NonceGen'' and ''Sign'' specified in this BIP are effectively restricted to constant functions ''fsk(_, _) = sk''.
+In other words, their usage ensure that the secret key ''sk'' of the signers is determined entirely when invoking ''NonceGen'',
+which is enforced easily by letting ''NonceGen'' take the corresponding individual public key ''pk'' as input and checking ''pk'' against ''IndividualPubKey(sk)'' in ''Sign''.</ref>
+Note that the scheme as given in the [https://eprint.iacr.org/2020/1261 MuSig2 paper] does not perform the check in ''Sign''.
+However, the security model in the paper does not cover tweaking at all and assumes a single fixed secret key.
+
+=== Modifications to Nonce Generation ===
+
+Implementers must avoid modifying the ''NonceGen'' algorithm without being fully aware of the implications.
+We provide two modifications to ''NonceGen'' that are secure when applied correctly and may be useful in special circumstances, summarized in the following table.
+
+{| class="wikitable" style="margin:auto"
+! !! needs secure randomness !! needs secure counter !! needs to keep state securely !! needs aggregate nonce of all other signers (only possible for one signer)
+|-
+! NonceGen || ✓ || &nbsp; || ✓ || &nbsp;
+|-
+! CounterNonceGen || &nbsp; || ✓ || ✓ || &nbsp;
+|-
+! DeterministicSign || &nbsp; || &nbsp; || &nbsp; || ✓
+|}
+
+First, on systems where obtaining uniformly random values is much harder than maintaining a global atomic counter, it can be beneficial to modify ''NonceGen''.
+The resulting algorithm ''CounterNonceGen'' does not draw ''rand' '' uniformly at random but instead sets ''rand' '' to the value of an atomic counter that is incremented whenever it is read.
+With this modification, the secret signing key ''sk'' of the signer generating the nonce is '''not''' an optional argument and must be provided to ''NonceGen''.
+The security of the resulting scheme then depends on the requirement that reading the counter must never yield the same counter value in two ''NonceGen'' invocations with the same ''sk''.
+
+Second, if there is a unique signer who is supposed to send the ''pubnonce'' last, it is possible to modify nonce generation for this single signer to not require high-quality randomness.
+Such a nonce generation algorithm ''DeterministicSign'' is specified below.
+Note that the only optional argument is ''rand'', which can be omitted if randomness is entirely unavailable.
+''DeterministicSign'' requires the argument ''aggothernonce'' which should be set to the output of ''NonceAgg'' run on the ''pubnonce'' value of '''all''' other signers (but can be provided by an untrusted party).
+Hence, using ''DeterministicSign'' is only possible for the last signer to generate a nonce and makes the signer stateless, similar to the stateless signer described in the [[#nonce-generation|Nonce Generation]] section.
+
+==== Deterministic and Stateless Signing for a Single Signer ====
+
+<div>
+Algorithm ''DeterministicSign(sk, aggothernonce, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, rand)'':
+* Inputs:
+** The secret signing key ''sk'': a 32-byte array
+** The aggregate public nonce ''aggothernonce'' (see [[#modifications-to-nonce-generation|above]]): a 66-byte array
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 32-byte arrays
+** The number ''v'' of tweaks with ''0 &le; v < 2^32''
+** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+** The tweak methods ''is_xonly_t<sub>1..v</sub>'': ''v'' booleans
+** The message ''m'': a byte array<ref name="mlen" />
+** The auxiliary randomness ''rand'': a 32-byte array (optional argument)
+* If the optional argument ''rand'' is present:
+** Let ''sk' '' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand)''
+* Else:
+** Let ''sk' = sk''
+* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
+* For ''i = 1 .. v'':
+** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
+* Let ''aggpk = GetPubkey(keyagg_ctx<sub>v</sub>)''
+* Let ''k<sub>i</sub> = int(hash<sub>MuSig/deterministic/nonce</sub>(sk' || aggothernonce || aggpk || bytes(8, len(m)) || m || bytes(1, i - 1))) mod n'' for ''i = 1,2''
+* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
+* Let ''R<sub>⁎,1</sub> = k<sub>1</sub>⋅G, R<sub>⁎,2</sub> = k<sub>2</sub>⋅G''
+* Let ''pubnonce = cbytes(R<sub>⁎,2</sub>) || cbytes(R<sub>⁎,2</sub>)''
+* Let ''d = int(sk)''
+* Fail if ''d = 0'' or ''d &ge; n''
+* Let ''pk = cbytes(d⋅G)''
+* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''
+* Let ''aggnonce = NonceAgg((pubnonce, aggothernonce))''; fail if that fails and blame nonce aggregator for invalid ''aggothernonce''.
+* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
+* Return ''(pubnonce, Sign(secnonce, sk, session_ctx))''
+</div>
+
+=== Tweaking Definition ===
+
+Two modes of tweaking the aggregate public key are supported. They correspond to the following algorithms:
+
+<div>
+Algorithm ''ApplyPlainTweak(P, t)'':
+* Inputs:
+** ''P'': a point
+** The tweak ''t'': an integer with ''0 &le; t < n ''
+* Return ''P + t⋅G''
+</div>
+
+<div>
+Algorithm ''ApplyXonlyTweak(P, t)'':
+* Return ''with_even_y(P) + t⋅G''
+</div>
+
+=== Negation Of The Secret Key When Signing ===
+
+In order to produce a partial signature for an X-only aggregate public key that is an aggregate of ''u'' individual public keys and tweaked ''v'' times (X-only or plain), the ''[[#Sign negation|Sign]]'' algorithm may need to negate the secret key during the signing process.
+
+<poem>
+The following elliptic curve points arise as intermediate steps when creating a signature:
+• ''P<sub>i</sub>'' as computed in ''KeyAgg'' is the point corresponding to the ''i''-th signer's individual public key. Defining ''d<sub>i</sub>' '' to be the ''i''-th signer's secret key as an integer, i.e., the ''d' '' value as computed in the ''Sign'' algorithm of the ''i''-th signer, we have
+ ''P<sub>i</sub> = d<sub>i</sub>'⋅G ''.
+• ''Q<sub>0</sub>'' is the aggregate of the individual public keys. It is identical to value ''Q'' computed in ''KeyAgg'' and therefore defined as
+ ''Q<sub>0</sub> = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''.
+• ''Q<sub>i</sub>'' is the tweaked aggregate public key after the ''i''-th execution of ''ApplyTweak'' for ''1 &le; i &le; v''. It holds that
+ ''Q<sub>i</sub> = f(i-1) + t<sub>i</sub>⋅G'' for ''i = 1, ..., v'' where
+ ''f(i-1) := with_even_y(Q<sub>i-1</sub>)'' if ''is_xonly_t<sub>i</sub>'' and
+ ''f(i-1) := Q<sub>i-1</sub>'' otherwise.
+• ''with_even_y(Q<sub>v</sub>)'' is the final result of the key aggregation and tweaking operations. It corresponds to the output of ''GetXonlyPubkey'' applied on the final KeyAgg Context.
+</poem>
+
+The signer's goal is to produce a partial signature corresponding to the final result of key aggregation and tweaking, i.e., the X-only public key ''with_even_y(Q<sub>v</sub>)''.
+
+<poem>
+For ''1 &le; i &le; v'', we denote the value ''g'' computed in the ''i''-th execution of ''ApplyTweak'' by ''g<sub>i-1</sub>''. Therefore, ''g<sub>i-1</sub>'' is ''-1 mod n'' if and only if ''is_xonly_t<sub>i</sub>'' is true and ''Q<sub>i-1</sub>'' has an odd Y coordinate. In other words, ''g<sub>i-1</sub>'' indicates whether ''Q<sub>i-1</sub>'' needed to be negated to apply an X-only tweak:
+ ''f(i-1) = g<sub>i-1</sub>⋅Q<sub>i-1</sub>'' for ''1 &le; i &le; v''.
+
+Furthermore, the ''Sign'' and ''PartialSigVerify'' algorithms set value ''g'' depending on whether ''Q<sub>v</sub>'' needed to be negated to produce the (X-only) final output. For consistency, this value ''g'' is referred to as ''g<sub>v</sub>'' in this section.
+ ''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅Q<sub>v</sub>''.
+</poem>
+
+<poem>
+So, the (X-only) final public key is
+ ''with_even_y(Q<sub>v</sub>)
+ = g<sub>v</sub>⋅Q<sub>v</sub>
+ = g<sub>v</sub>⋅(f(v-1) + t<sub>v</sub>⋅G)
+ = g<sub>v</sub>⋅(g<sub>v-1</sub>⋅(f(v-2) + t<sub>v-1</sub>⋅G) + t<sub>v</sub>⋅G)
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + g<sub>v</sub>⋅(t<sub>v</sub> + g<sub>v-1</sub>⋅t<sub>v-1</sub>)⋅G
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + (sum<sub>i=v-1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅...⋅g<sub>1</sub>⋅f(0) + (sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
+ = g<sub>v</sub>⋅...⋅g<sub>0</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''
+ where ''tacc<sub>i</sub>'' is computed by ''KeyAgg'' and ''ApplyTweak'' as follows:
+ ''tacc<sub>0</sub> = 0
+ tacc<sub>i</sub> = t<sub>i</sub> + g<sub>i-1</sub>⋅tacc<sub>i-1</sub> for i=1..v mod n''
+ for which it holds that ''g<sub>v</sub>⋅tacc<sub>v</sub> = sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>''.
+</poem>
+
+<poem>
+''KeyAgg'' and ''ApplyTweak'' compute
+ ''gacc<sub>0</sub> = 1
+ gacc<sub>i</sub> = g<sub>i-1</sub>⋅gacc<sub>i-1</sub> for i=1..v mod n''
+So we can rewrite above equation for the final public key as
+ ''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''.
+</poem>
+
+<poem>
+Then we have
+ ''with_even_y(Q<sub>v</sub>) - g<sub>v</sub>⋅tacc<sub>v</sub>⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub>
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅P<sub>1</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>)
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅d<sub>1</sub>'⋅G + ... + a<sub>u</sub>⋅d<sub>u</sub>'⋅G)
+ = sum<sub>i=1..u</sub>(g<sub>v</sub>⋅gacc<sub>v</sub>⋅a<sub>i</sub>⋅d<sub>i</sub>')*G''.
+</poem>
+
+Intuitively, ''gacc<sub>i</sub>'' tracks accumulated sign flipping and ''tacc<sub>i</sub>'' tracks the accumulated tweak value after applying the first ''i'' individual tweaks. Additionally, ''g<sub>v</sub>'' indicates whether ''Q<sub>v</sub>'' needed to be negated to produce the final X-only result. Thus, signer ''i'' multiplies its secret key ''d<sub>i</sub>' '' with ''g<sub>v</sub>⋅gacc<sub>v</sub>'' in the ''[[#Sign negation|Sign]]'' algorithm.
+
+==== Negation Of The Individual Public Key When Partially Verifying ====
+
+<poem>
+As explained in [[#negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]] the signer uses a possibly negated secret key
+ ''d = g<sub>v</sub>⋅gacc<sub>v</sub>⋅d' mod n''
+when producing a partial signature to ensure that the aggregate signature will correspond to an aggregate public key with even Y coordinate.
+</poem>
+
+<poem>
+The ''[[#SigVerify negation|PartialSigVerifyInternal]]'' algorithm is supposed to check
+ ''s⋅G = Re<sub>⁎</sub> + e⋅a⋅d⋅G''.
+</poem>
+
+<poem>
+The verifier doesn't have access to ''d⋅G'' but can construct it using the individual public key ''pk'' as follows:
+''d⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅d'⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅cpoint(pk)''
+Note that the aggregate public key and list of tweaks are inputs to partial signature verification, so the verifier can also construct ''g<sub>v</sub>'' and ''gacc<sub>v</sub>''.
+</poem>
+
+=== Dealing with Infinity in Nonce Aggregation ===
+
+If the nonce aggregator provides ''aggnonce = bytes(33,0) || bytes(33,0)'', either the nonce aggregator is dishonest or there is at least one dishonest signer (except with negligible probability).
+If signing aborted in this case, it would be impossible to determine who is dishonest.
+Therefore, signing continues so that the culprit is revealed when collecting and verifying partial signatures.
+
+However, the final nonce ''R'' of a BIP340 Schnorr signature cannot be the point at infinity.
+If we would nonetheless allow the final nonce to be the point at infinity, then the scheme would lose the following property:
+if ''PartialSigVerify'' succeeds for all partial signatures, then ''PartialSigAgg'' will return a valid Schnorr signature.
+Since this is a valuable feature, we modify MuSig2* (which is defined in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]) to avoid producing an invalid Schnorr signature while still allowing detection of the dishonest signer: In ''GetSessionValues'', if the final nonce ''R'' would be the point at infinity, set it to the generator instead (an arbitrary choice).
+
+This modification to ''GetSessionValues'' does not affect the unforgeability of the scheme.
+Given a successful adversary against the unforgeability game (EUF-CMA) for the modified scheme, a reduction can win the unforgeability game for the original scheme by simulating the modification towards the adversary:
+When the adversary provides ''aggnonce' = bytes(33, 0) || bytes(33, 0)'', the reduction sets ''aggnonce = cbytes_ext(G) || bytes(33, 0)''.
+For any other ''aggnonce' '', the reduction sets ''aggnonce = aggnonce' ''.
+(The case that the adversary provides an ''aggnonce' ≠ bytes(33, 0) || bytes(33, 0) '' but nevertheless ''R' '' in ''GetSessionValues'' is the point at infinity happens only with negligible probability.)
+
+=== Choosing the Size of the Nonce ===
+
+The [https://eprint.iacr.org/2020/1261 MuSig2 paper] contains two security proofs that apply to different variants of the scheme.
+The first proof relies on the random oracle model (ROM) and applies to a scheme variant where each signer's nonce consists of four elliptic curve points.
+The second proof requires a stronger model, namely the combination of the ROM and the algebraic group model (AGM),
+and applies to an optimized scheme variant where the signers' nonces consist of only two points.
+This proposal uses the latter, optimized scheme variant.
+Relying on the stronger model is a legitimate choice for the following reasons:
+
+First, an approach widely taken is interpreting a Forking Lemma proof in the ROM merely as design justification and ignoring the loss of security due to the Forking Lemma.
+If one believes in this approach, then the ROM may not be the optimal model in the first place because some parts of the concrete security bound are arbitrarily ignored.
+One may just as well move to the ROM+AGM model, which produces bounds close to the best-known attacks, e.g., for Schnorr signatures.
+
+Second, as of this writing, there is no instance of a serious cryptographic scheme with a security proof in the AGM that is not secure in practice.
+There are, however, insecure toy schemes with AGM security proofs, but those explicitly violate the requirements of the AGM.
+[https://eprint.iacr.org/2022/226.pdf Broken AGM proofs of toy schemes] provide group elements to the adversary without declaring them as group element inputs.
+In contrast, in MuSig2, all group elements that arise in the scheme are known to the adversary and declared as group element inputs.
+A scheme very similar to MuSig2 and with two-point nonces was independently proven secure in the ROM and AGM by [https://eprint.iacr.org/2020/1245 Alper and Burdges].
+
+== Backwards Compatibility ==
+
+This document proposes a standard for the MuSig2 multi-signature scheme that is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
+MuSig2 is ''not'' compatible with ECDSA signatures traditionally used in Bitcoin.
+
+== Change Log ==
+
+To help implementers understand updates to this document, we attach a version number that resembles ''semantic versioning'' (<code>MAJOR.MINOR.PATCH</code>).
+The <code>MAJOR</code> version is incremented if changes to the BIP are introduced that are incompatible with prior versions.
+An exception to this rule is <code>MAJOR</code> version zero (0.y.z) which is for development and does not need to be incremented if backwards incompatible changes are introduced.
+The <code>MINOR</code> version is incremented whenever the inputs or the output of an algorithm changes in a backward-compatible way or new backward-compatible functionality is added.
+The <code>PATCH</code> version is incremented for other changes that are noteworthy (bug fixes, test vectors, important clarifications, etc.).
+
+* '''1.0.0''' (2023-03-26):
+** Number 327 was assigned to this BIP.
+* '''1.0.0-rc.4''' (2023-03-02):
+** Add expected value of ''pubnonce'' to ''NonceGen'' test vectors.
+* '''1.0.0-rc.3''' (2023-02-28):
+** Improve ''NonceGen'' test vectors by not using an all-zero hex string as ''rand_'' values. This change addresses potential issues in some implementations that interpret this as a special value indicating uninitialized memory or a broken random number generator and therefore return an error.
+** Fix invalid length of a ''pubnonce'' in the ''PartialSigVerify'' test vectors.
+** Improve ''KeySort'' test vector.
+** Add explicit ''IndividualPubkey'' algorithm.
+** Rename KeyGen Context to KeyAgg Context.
+* '''1.0.0-rc.2''' (2022-10-28):
+** Fix vulnerability that can occur in certain unusual scenarios (see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list]: Add mandatory ''pk'' argument to ''NonceGen'', append ''pk'' to ''secnonce'' and check in ''Sign'' that the ''pk'' in ''secnonce'' matches. Update test vectors.
+** Make sure that signer's key is in list of individual public keys by adding failure case to ''GetSessionKeyAggCoeff'' and add test vectors.
+* '''1.0.0-rc.1''' (2022-10-03): Submit draft BIP to the BIPs repository
+* '''0.8.6''' (2022-09-15): Clarify that implementations do not need to support every feature and add a test vector for signing with a tweaked key
+* '''0.8.5''' (2022-09-05): Rename some functions to improve clarity.
+* '''0.8.4''' (2022-09-02): Make naming of nonce variants ''R'' in specifications of the algorithms and reference code easier to read and more consistent.
+* '''0.8.3''' (2022-09-01): Overwrite ''secnonce'' in ''sign'' reference implementation to help prevent accidental reuse and add test vector for invalid ''secnonce''.
+* '''0.8.2''' (2022-08-30): Fix ''KeySort'' input length and add test vectors
+* '''0.8.1''' (2022-08-26): Add ''DeterministicSign'' algorithm
+* '''0.8.0''' (2022-08-26): Switch from X-only to plain public key for individual public keys. This requires updating a large portion of the test vectors.
+* '''0.7.2''' (2022-08-17): Add ''NonceGen'' and ''Sign/PartialSigVerify'' test vectors for messages longer than 32 bytes.
+* '''0.7.1''' (2022-08-10): Extract test vectors into separate JSON file.
+* '''0.7.0''' (2022-07-31): Change ''NonceGen'' such that output when message is not present is different from when message is present but has length 0.
+* '''0.6.0''' (2022-07-31): Allow variable length messages, change serialization of the message in the ''NonceGen'' hash function, and add test vectors
+* '''0.5.2''' (2022-06-26): Fix ''aggpk'' in ''NonceGen'' test vectors.
+* '''0.5.1''' (2022-06-22): Rename "ordinary" tweaking to "plain" tweaking.
+* '''0.5.0''' (2022-06-21): Separate ApplyTweak from KeyAgg and introduce KeyGen Context.
+* '''0.4.0''' (2022-06-20): Allow the output of NonceAgg to be infinity and add test vectors
+* '''0.3.2''' (2022-06-02): Add a lot of test vectors and improve handling of invalid contributions in reference code.
+* '''0.3.1''' (2022-05-24): Add ''NonceGen'' test vectors
+* '''0.3.0''' (2022-05-24): Hash ''i - 1'' instead of ''i'' in ''NonceGen''
+* '''0.2.0''' (2022-05-19): Change order of arguments in ''NonceGen'' hash function
+* '''0.1.0''' (2022-05-19): Publication of draft BIP on the bitcoin-dev mailing list
+
+== Footnotes ==
+
+<references />
+
+== Acknowledgements ==
+
+We thank Brandon Black, Riccardo Casatta, Lloyd Fournier, Russell O'Connor, and Pieter Wuille for their contributions to this document.
diff --git a/bip-0327/gen_vectors_helper.py b/bip-0327/gen_vectors_helper.py
new file mode 100644
index 0000000..a70bb6f
--- /dev/null
+++ b/bip-0327/gen_vectors_helper.py
@@ -0,0 +1,184 @@
+from reference import *
+
+def gen_key_agg_vectors():
+ print("key_agg_vectors.json: Intermediate tweaking result is point at infinity")
+ sk = bytes.fromhex("7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671")
+ pk = individual_pk(sk)
+ keygen_ctx = key_agg([pk])
+ aggpoint, _, _ = keygen_ctx
+ aggsk = key_agg_coeff([pk], pk)*int_from_bytes(sk) % n
+ t = n - aggsk
+ assert point_add(point_mul(G, t), aggpoint) == None
+ is_xonly = False
+ tweak = bytes_from_int(t)
+ assert_raises(ValueError, lambda: apply_tweak(keygen_ctx, tweak, is_xonly), lambda e: True)
+ print(" pubkey:", pk.hex().upper())
+ print(" tweak: ", tweak.hex().upper())
+
+def check_sign_verify_vectors():
+ with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
+ test_data = json.load(f)
+ X = fromhex_all(test_data["pubkeys"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ aggnonces = fromhex_all(test_data["aggnonces"])
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ for (i, test_case) in enumerate(valid_test_cases):
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ assert nonce_agg(pubnonces) == aggnonce
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ (Q, _, _, _, R, _) = get_session_values(session_ctx)
+ # Make sure the vectors include tests for both variants of Q and R
+ if i == 0:
+ assert has_even_y(Q) and not has_even_y(R)
+ if i == 1:
+ assert not has_even_y(Q) and has_even_y(R)
+ if i == 2:
+ assert has_even_y(Q) and has_even_y(R)
+
+def check_tweak_vectors():
+ with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ tweak = fromhex_all(test_data["tweaks"])
+ valid_test_cases = test_data["valid_test_cases"]
+
+ for (i, test_case) in enumerate(valid_test_cases):
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+
+ _, gacc, _ = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
+ # Make sure the vectors include tests for gacc = 1 and -1
+ if i == 0:
+ assert gacc == n - 1
+ if i == 1:
+ assert gacc == 1
+
+def sig_agg_vectors():
+ print("sig_agg_vectors.json:")
+ sk = fromhex_all([
+ "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "3874D22DE7A7290C49CE7F1DC17D1A8CD8918E1F799055139D57FC0988D04D10",
+ "D0EA1B84481ED1BCFAA39D6775F97BDC9BF8D7C02FD0C009D6D85BAE5EC7B87A",
+ "FC2BF9E056B273AF0A8AABB815E541A3552C142AC10D4FE584F01D2CAB84F577"])
+ pubkeys = list(map(lambda secret: individual_pk(secret), sk))
+ indices32 = [i.to_bytes(32, 'big') for i in range(6)]
+ secnonces, pnonces = zip(*[nonce_gen_internal(r, None, pubkeys[0], None, None, None) for r in indices32])
+ tweaks = fromhex_all([
+ "B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
+ "A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
+ "75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"])
+ msg = bytes.fromhex("599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869")
+
+ psigs = [None] * 9
+
+ valid_test_cases = [
+ {
+ "aggnonce": None,
+ "nonce_indices": [0, 1],
+ "key_indices": [0, 1],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [0, 1],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 2],
+ "key_indices": [0, 2],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [2, 3],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 3],
+ "key_indices": [0, 2],
+ "tweak_indices": [0],
+ "is_xonly": [False],
+ "psig_indices": [4, 5],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 4],
+ "key_indices": [0, 3],
+ "tweak_indices": [0, 1, 2],
+ "is_xonly": [True, False, True],
+ "psig_indices": [6, 7],
+ },
+ ]
+ for (i, test_case) in enumerate(valid_test_cases):
+ is_xonly = test_case["is_xonly"]
+ nonce_indices = test_case["nonce_indices"]
+ key_indices = test_case["key_indices"]
+ psig_indices = test_case["psig_indices"]
+ vec_pnonces = [pnonces[i] for i in nonce_indices]
+ vec_pubkeys = [pubkeys[i] for i in key_indices]
+ vec_tweaks = [tweaks[i] for i in test_case["tweak_indices"]]
+
+ aggnonce = nonce_agg(vec_pnonces)
+ test_case["aggnonce"] = aggnonce.hex().upper()
+ session_ctx = SessionContext(aggnonce, vec_pubkeys, vec_tweaks, is_xonly, msg)
+
+ for j in range(len(key_indices)):
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ secnonce_tmp = bytearray(secnonces[nonce_indices[j]][:64] + pubkeys[key_indices[j]])
+ psigs[psig_indices[j]] = sign(secnonce_tmp, sk[key_indices[j]], session_ctx)
+ sig = partial_sig_agg([psigs[i] for i in psig_indices], session_ctx)
+ keygen_ctx = key_agg_and_tweak(vec_pubkeys, vec_tweaks, is_xonly)
+ # To maximize coverage of the sig_agg algorithm, we want one public key
+ # point with an even and one with an odd Y coordinate.
+ if i == 0:
+ assert(has_even_y(keygen_ctx[0]))
+ if i == 1:
+ assert(not has_even_y(keygen_ctx[0]))
+ aggpk = get_xonly_pk(keygen_ctx)
+ assert schnorr_verify(msg, aggpk, sig)
+ test_case["expected"] = sig.hex().upper()
+
+ error_test_case = {
+ "aggnonce": None,
+ "nonce_indices": [0, 4],
+ "key_indices": [0, 3],
+ "tweak_indices": [0, 1, 2],
+ "is_xonly": [True, False, True],
+ "psig_indices": [7, 8],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1
+ },
+ "comment": "Partial signature is invalid because it exceeds group size"
+ }
+
+ psigs[8] = bytes.fromhex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141")
+
+ vec_pnonces = [pnonces[i] for i in error_test_case["nonce_indices"]]
+ aggnonce = nonce_agg(vec_pnonces)
+ error_test_case["aggnonce"] = aggnonce.hex().upper()
+
+ def tohex_all(l):
+ return list(map(lambda e: e.hex().upper(), l))
+
+ print(json.dumps({
+ "pubkeys": tohex_all(pubkeys),
+ "pnonces": tohex_all(pnonces),
+ "tweaks": tohex_all(tweaks),
+ "psigs": tohex_all(psigs),
+ "msg": msg.hex().upper(),
+ "valid_test_cases": valid_test_cases,
+ "error_test_cases": [error_test_case]
+ }, indent=4))
+
+gen_key_agg_vectors()
+check_sign_verify_vectors()
+check_tweak_vectors()
+print()
+sig_agg_vectors()
diff --git a/bip-0327/reference.py b/bip-0327/reference.py
new file mode 100644
index 0000000..edf6e76
--- /dev/null
+++ b/bip-0327/reference.py
@@ -0,0 +1,880 @@
+# BIP327 reference implementation
+#
+# WARNING: This implementation is for demonstration purposes only and _not_ to
+# be used in production environments. The code is vulnerable to timing attacks,
+# for example.
+
+from typing import Any, List, Optional, Tuple, NewType, NamedTuple
+import hashlib
+import secrets
+import time
+
+#
+# The following helper functions were copied from the BIP-340 reference implementation:
+# https://github.com/bitcoin/bips/blob/master/bip-0340/reference.py
+#
+
+p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
+n = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
+
+# Points are tuples of X and Y coordinates and the point at infinity is
+# represented by the None keyword.
+G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
+
+Point = Tuple[int, int]
+
+# This implementation can be sped up by storing the midstate after hashing
+# tag_hash instead of rehashing it all the time.
+def tagged_hash(tag: str, msg: bytes) -> bytes:
+ tag_hash = hashlib.sha256(tag.encode()).digest()
+ return hashlib.sha256(tag_hash + tag_hash + msg).digest()
+
+def is_infinite(P: Optional[Point]) -> bool:
+ return P is None
+
+def x(P: Point) -> int:
+ assert not is_infinite(P)
+ return P[0]
+
+def y(P: Point) -> int:
+ assert not is_infinite(P)
+ return P[1]
+
+def point_add(P1: Optional[Point], P2: Optional[Point]) -> Optional[Point]:
+ if P1 is None:
+ return P2
+ if P2 is None:
+ return P1
+ if (x(P1) == x(P2)) and (y(P1) != y(P2)):
+ return None
+ if P1 == P2:
+ lam = (3 * x(P1) * x(P1) * pow(2 * y(P1), p - 2, p)) % p
+ else:
+ lam = ((y(P2) - y(P1)) * pow(x(P2) - x(P1), p - 2, p)) % p
+ x3 = (lam * lam - x(P1) - x(P2)) % p
+ return (x3, (lam * (x(P1) - x3) - y(P1)) % p)
+
+def point_mul(P: Optional[Point], n: int) -> Optional[Point]:
+ R = None
+ for i in range(256):
+ if (n >> i) & 1:
+ R = point_add(R, P)
+ P = point_add(P, P)
+ return R
+
+def bytes_from_int(x: int) -> bytes:
+ return x.to_bytes(32, byteorder="big")
+
+def lift_x(b: bytes) -> Optional[Point]:
+ x = int_from_bytes(b)
+ if x >= p:
+ return None
+ y_sq = (pow(x, 3, p) + 7) % p
+ y = pow(y_sq, (p + 1) // 4, p)
+ if pow(y, 2, p) != y_sq:
+ return None
+ return (x, y if y & 1 == 0 else p-y)
+
+def int_from_bytes(b: bytes) -> int:
+ return int.from_bytes(b, byteorder="big")
+
+def has_even_y(P: Point) -> bool:
+ assert not is_infinite(P)
+ return y(P) % 2 == 0
+
+def schnorr_verify(msg: bytes, pubkey: bytes, sig: bytes) -> bool:
+ if len(msg) != 32:
+ raise ValueError('The message must be a 32-byte array.')
+ if len(pubkey) != 32:
+ raise ValueError('The public key must be a 32-byte array.')
+ if len(sig) != 64:
+ raise ValueError('The signature must be a 64-byte array.')
+ P = lift_x(pubkey)
+ r = int_from_bytes(sig[0:32])
+ s = int_from_bytes(sig[32:64])
+ if (P is None) or (r >= p) or (s >= n):
+ return False
+ e = int_from_bytes(tagged_hash("BIP0340/challenge", sig[0:32] + pubkey + msg)) % n
+ R = point_add(point_mul(G, s), point_mul(P, n - e))
+ if (R is None) or (not has_even_y(R)) or (x(R) != r):
+ return False
+ return True
+
+#
+# End of helper functions copied from BIP-340 reference implementation.
+#
+
+PlainPk = NewType('PlainPk', bytes)
+XonlyPk = NewType('XonlyPk', bytes)
+
+# There are two types of exceptions that can be raised by this implementation:
+# - ValueError for indicating that an input doesn't conform to some function
+# precondition (e.g. an input array is the wrong length, a serialized
+# representation doesn't have the correct format).
+# - InvalidContributionError for indicating that a signer (or the
+# aggregator) is misbehaving in the protocol.
+#
+# Assertions are used to (1) satisfy the type-checking system, and (2) check for
+# inconvenient events that can't happen except with negligible probability (e.g.
+# output of a hash function is 0) and can't be manually triggered by any
+# signer.
+
+# This exception is raised if a party (signer or nonce aggregator) sends invalid
+# values. Actual implementations should not crash when receiving invalid
+# contributions. Instead, they should hold the offending party accountable.
+class InvalidContributionError(Exception):
+ def __init__(self, signer, contrib):
+ self.signer = signer
+ # contrib is one of "pubkey", "pubnonce", "aggnonce", or "psig".
+ self.contrib = contrib
+
+infinity = None
+
+def xbytes(P: Point) -> bytes:
+ return bytes_from_int(x(P))
+
+def cbytes(P: Point) -> bytes:
+ a = b'\x02' if has_even_y(P) else b'\x03'
+ return a + xbytes(P)
+
+def cbytes_ext(P: Optional[Point]) -> bytes:
+ if is_infinite(P):
+ return (0).to_bytes(33, byteorder='big')
+ assert P is not None
+ return cbytes(P)
+
+def point_negate(P: Optional[Point]) -> Optional[Point]:
+ if P is None:
+ return P
+ return (x(P), p - y(P))
+
+def cpoint(x: bytes) -> Point:
+ if len(x) != 33:
+ raise ValueError('x is not a valid compressed point.')
+ P = lift_x(x[1:33])
+ if P is None:
+ raise ValueError('x is not a valid compressed point.')
+ if x[0] == 2:
+ return P
+ elif x[0] == 3:
+ P = point_negate(P)
+ assert P is not None
+ return P
+ else:
+ raise ValueError('x is not a valid compressed point.')
+
+def cpoint_ext(x: bytes) -> Optional[Point]:
+ if x == (0).to_bytes(33, 'big'):
+ return None
+ else:
+ return cpoint(x)
+
+# Return the plain public key corresponding to a given secret key
+def individual_pk(seckey: bytes) -> PlainPk:
+ d0 = int_from_bytes(seckey)
+ if not (1 <= d0 <= n - 1):
+ raise ValueError('The secret key must be an integer in the range 1..n-1.')
+ P = point_mul(G, d0)
+ assert P is not None
+ return PlainPk(cbytes(P))
+
+def key_sort(pubkeys: List[PlainPk]) -> List[PlainPk]:
+ pubkeys.sort()
+ return pubkeys
+
+KeyAggContext = NamedTuple('KeyAggContext', [('Q', Point),
+ ('gacc', int),
+ ('tacc', int)])
+
+def get_xonly_pk(keyagg_ctx: KeyAggContext) -> XonlyPk:
+ Q, _, _ = keyagg_ctx
+ return XonlyPk(xbytes(Q))
+
+def key_agg(pubkeys: List[PlainPk]) -> KeyAggContext:
+ pk2 = get_second_key(pubkeys)
+ u = len(pubkeys)
+ Q = infinity
+ for i in range(u):
+ try:
+ P_i = cpoint(pubkeys[i])
+ except ValueError:
+ raise InvalidContributionError(i, "pubkey")
+ a_i = key_agg_coeff_internal(pubkeys, pubkeys[i], pk2)
+ Q = point_add(Q, point_mul(P_i, a_i))
+ # Q is not the point at infinity except with negligible probability.
+ assert(Q is not None)
+ gacc = 1
+ tacc = 0
+ return KeyAggContext(Q, gacc, tacc)
+
+def hash_keys(pubkeys: List[PlainPk]) -> bytes:
+ return tagged_hash('KeyAgg list', b''.join(pubkeys))
+
+def get_second_key(pubkeys: List[PlainPk]) -> PlainPk:
+ u = len(pubkeys)
+ for j in range(1, u):
+ if pubkeys[j] != pubkeys[0]:
+ return pubkeys[j]
+ return PlainPk(b'\x00'*33)
+
+def key_agg_coeff(pubkeys: List[PlainPk], pk_: PlainPk) -> int:
+ pk2 = get_second_key(pubkeys)
+ return key_agg_coeff_internal(pubkeys, pk_, pk2)
+
+def key_agg_coeff_internal(pubkeys: List[PlainPk], pk_: PlainPk, pk2: PlainPk) -> int:
+ L = hash_keys(pubkeys)
+ if pk_ == pk2:
+ return 1
+ return int_from_bytes(tagged_hash('KeyAgg coefficient', L + pk_)) % n
+
+def apply_tweak(keyagg_ctx: KeyAggContext, tweak: bytes, is_xonly: bool) -> KeyAggContext:
+ if len(tweak) != 32:
+ raise ValueError('The tweak must be a 32-byte array.')
+ Q, gacc, tacc = keyagg_ctx
+ if is_xonly and not has_even_y(Q):
+ g = n - 1
+ else:
+ g = 1
+ t = int_from_bytes(tweak)
+ if t >= n:
+ raise ValueError('The tweak must be less than n.')
+ Q_ = point_add(point_mul(Q, g), point_mul(G, t))
+ if Q_ is None:
+ raise ValueError('The result of tweaking cannot be infinity.')
+ gacc_ = g * gacc % n
+ tacc_ = (t + g * tacc) % n
+ return KeyAggContext(Q_, gacc_, tacc_)
+
+def bytes_xor(a: bytes, b: bytes) -> bytes:
+ return bytes(x ^ y for x, y in zip(a, b))
+
+def nonce_hash(rand: bytes, pk: PlainPk, aggpk: XonlyPk, i: int, msg_prefixed: bytes, extra_in: bytes) -> int:
+ buf = b''
+ buf += rand
+ buf += len(pk).to_bytes(1, 'big')
+ buf += pk
+ buf += len(aggpk).to_bytes(1, 'big')
+ buf += aggpk
+ buf += msg_prefixed
+ buf += len(extra_in).to_bytes(4, 'big')
+ buf += extra_in
+ buf += i.to_bytes(1, 'big')
+ return int_from_bytes(tagged_hash('MuSig/nonce', buf))
+
+def nonce_gen_internal(rand_: bytes, sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
+ if sk is not None:
+ rand = bytes_xor(sk, tagged_hash('MuSig/aux', rand_))
+ else:
+ rand = rand_
+ if aggpk is None:
+ aggpk = XonlyPk(b'')
+ if msg is None:
+ msg_prefixed = b'\x00'
+ else:
+ msg_prefixed = b'\x01'
+ msg_prefixed += len(msg).to_bytes(8, 'big')
+ msg_prefixed += msg
+ if extra_in is None:
+ extra_in = b''
+ k_1 = nonce_hash(rand, pk, aggpk, 0, msg_prefixed, extra_in) % n
+ k_2 = nonce_hash(rand, pk, aggpk, 1, msg_prefixed, extra_in) % n
+ # k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
+ assert k_1 != 0
+ assert k_2 != 0
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + pk)
+ return secnonce, pubnonce
+
+def nonce_gen(sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
+ if sk is not None and len(sk) != 32:
+ raise ValueError('The optional byte array sk must have length 32.')
+ if aggpk is not None and len(aggpk) != 32:
+ raise ValueError('The optional byte array aggpk must have length 32.')
+ rand_ = secrets.token_bytes(32)
+ return nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in)
+
+def nonce_agg(pubnonces: List[bytes]) -> bytes:
+ u = len(pubnonces)
+ aggnonce = b''
+ for j in (1, 2):
+ R_j = infinity
+ for i in range(u):
+ try:
+ R_ij = cpoint(pubnonces[i][(j-1)*33:j*33])
+ except ValueError:
+ raise InvalidContributionError(i, "pubnonce")
+ R_j = point_add(R_j, R_ij)
+ aggnonce += cbytes_ext(R_j)
+ return aggnonce
+
+SessionContext = NamedTuple('SessionContext', [('aggnonce', bytes),
+ ('pubkeys', List[PlainPk]),
+ ('tweaks', List[bytes]),
+ ('is_xonly', List[bool]),
+ ('msg', bytes)])
+
+def key_agg_and_tweak(pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool]):
+ if len(tweaks) != len(is_xonly):
+ raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
+ keyagg_ctx = key_agg(pubkeys)
+ v = len(tweaks)
+ for i in range(v):
+ keyagg_ctx = apply_tweak(keyagg_ctx, tweaks[i], is_xonly[i])
+ return keyagg_ctx
+
+def get_session_values(session_ctx: SessionContext) -> Tuple[Point, int, int, int, Point, int]:
+ (aggnonce, pubkeys, tweaks, is_xonly, msg) = session_ctx
+ Q, gacc, tacc = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
+ b = int_from_bytes(tagged_hash('MuSig/noncecoef', aggnonce + xbytes(Q) + msg)) % n
+ try:
+ R_1 = cpoint_ext(aggnonce[0:33])
+ R_2 = cpoint_ext(aggnonce[33:66])
+ except ValueError:
+ # Nonce aggregator sent invalid nonces
+ raise InvalidContributionError(None, "aggnonce")
+ R_ = point_add(R_1, point_mul(R_2, b))
+ R = R_ if not is_infinite(R_) else G
+ assert R is not None
+ e = int_from_bytes(tagged_hash('BIP0340/challenge', xbytes(R) + xbytes(Q) + msg)) % n
+ return (Q, gacc, tacc, b, R, e)
+
+def get_session_key_agg_coeff(session_ctx: SessionContext, P: Point) -> int:
+ (_, pubkeys, _, _, _) = session_ctx
+ pk = PlainPk(cbytes(P))
+ if pk not in pubkeys:
+ raise ValueError('The signer\'s pubkey must be included in the list of pubkeys.')
+ return key_agg_coeff(pubkeys, pk)
+
+def sign(secnonce: bytearray, sk: bytes, session_ctx: SessionContext) -> bytes:
+ (Q, gacc, _, b, R, e) = get_session_values(session_ctx)
+ k_1_ = int_from_bytes(secnonce[0:32])
+ k_2_ = int_from_bytes(secnonce[32:64])
+ # Overwrite the secnonce argument with zeros such that subsequent calls of
+ # sign with the same secnonce raise a ValueError.
+ secnonce[:64] = bytearray(b'\x00'*64)
+ if not 0 < k_1_ < n:
+ raise ValueError('first secnonce value is out of range.')
+ if not 0 < k_2_ < n:
+ raise ValueError('second secnonce value is out of range.')
+ k_1 = k_1_ if has_even_y(R) else n - k_1_
+ k_2 = k_2_ if has_even_y(R) else n - k_2_
+ d_ = int_from_bytes(sk)
+ if not 0 < d_ < n:
+ raise ValueError('secret key value is out of range.')
+ P = point_mul(G, d_)
+ assert P is not None
+ pk = cbytes(P)
+ if not pk == secnonce[64:97]:
+ raise ValueError('Public key does not match nonce_gen argument')
+ a = get_session_key_agg_coeff(session_ctx, P)
+ g = 1 if has_even_y(Q) else n - 1
+ d = g * gacc * d_ % n
+ s = (k_1 + b * k_2 + e * a * d) % n
+ psig = bytes_from_int(s)
+ R_s1 = point_mul(G, k_1_)
+ R_s2 = point_mul(G, k_2_)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ # Optional correctness check. The result of signing should pass signature verification.
+ assert partial_sig_verify_internal(psig, pubnonce, pk, session_ctx)
+ return psig
+
+def det_nonce_hash(sk_: bytes, aggothernonce: bytes, aggpk: bytes, msg: bytes, i: int) -> int:
+ buf = b''
+ buf += sk_
+ buf += aggothernonce
+ buf += aggpk
+ buf += len(msg).to_bytes(8, 'big')
+ buf += msg
+ buf += i.to_bytes(1, 'big')
+ return int_from_bytes(tagged_hash('MuSig/deterministic/nonce', buf))
+
+def deterministic_sign(sk: bytes, aggothernonce: bytes, pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, rand: Optional[bytes]) -> Tuple[bytes, bytes]:
+ if rand is not None:
+ sk_ = bytes_xor(sk, tagged_hash('MuSig/aux', rand))
+ else:
+ sk_ = sk
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+
+ k_1 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 0) % n
+ k_2 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 1) % n
+ # k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
+ assert k_1 != 0
+ assert k_2 != 0
+
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + individual_pk(sk))
+ try:
+ aggnonce = nonce_agg([pubnonce, aggothernonce])
+ except Exception:
+ raise InvalidContributionError(None, "aggothernonce")
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ psig = sign(secnonce, sk, session_ctx)
+ return (pubnonce, psig)
+
+def partial_sig_verify(psig: bytes, pubnonces: List[bytes], pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, i: int) -> bool:
+ if len(pubnonces) != len(pubkeys):
+ raise ValueError('The `pubnonces` and `pubkeys` arrays must have the same length.')
+ if len(tweaks) != len(is_xonly):
+ raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
+ aggnonce = nonce_agg(pubnonces)
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ return partial_sig_verify_internal(psig, pubnonces[i], pubkeys[i], session_ctx)
+
+def partial_sig_verify_internal(psig: bytes, pubnonce: bytes, pk: bytes, session_ctx: SessionContext) -> bool:
+ (Q, gacc, _, b, R, e) = get_session_values(session_ctx)
+ s = int_from_bytes(psig)
+ if s >= n:
+ return False
+ R_s1 = cpoint(pubnonce[0:33])
+ R_s2 = cpoint(pubnonce[33:66])
+ Re_s_ = point_add(R_s1, point_mul(R_s2, b))
+ Re_s = Re_s_ if has_even_y(R) else point_negate(Re_s_)
+ P = cpoint(pk)
+ if P is None:
+ return False
+ a = get_session_key_agg_coeff(session_ctx, P)
+ g = 1 if has_even_y(Q) else n - 1
+ g_ = g * gacc % n
+ return point_mul(G, s) == point_add(Re_s, point_mul(P, e * a * g_ % n))
+
+def partial_sig_agg(psigs: List[bytes], session_ctx: SessionContext) -> bytes:
+ (Q, _, tacc, _, R, e) = get_session_values(session_ctx)
+ s = 0
+ u = len(psigs)
+ for i in range(u):
+ s_i = int_from_bytes(psigs[i])
+ if s_i >= n:
+ raise InvalidContributionError(i, "psig")
+ s = (s + s_i) % n
+ g = 1 if has_even_y(Q) else n - 1
+ s = (s + e * g * tacc) % n
+ return xbytes(R) + bytes_from_int(s)
+#
+# The following code is only used for testing.
+#
+
+import json
+import os
+import sys
+
+def fromhex_all(l):
+ return [bytes.fromhex(l_i) for l_i in l]
+
+# Check that calling `try_fn` raises a `exception`. If `exception` is raised,
+# examine it with `except_fn`.
+def assert_raises(exception, try_fn, except_fn):
+ raised = False
+ try:
+ try_fn()
+ except exception as e:
+ raised = True
+ assert(except_fn(e))
+ except BaseException:
+ raise AssertionError("Wrong exception raised in a test.")
+ if not raised:
+ raise AssertionError("Exception was _not_ raised in a test where it was required.")
+
+def get_error_details(test_case):
+ error = test_case["error"]
+ if error["type"] == "invalid_contribution":
+ exception = InvalidContributionError
+ if "contrib" in error:
+ except_fn = lambda e: e.signer == error["signer"] and e.contrib == error["contrib"]
+ else:
+ except_fn = lambda e: e.signer == error["signer"]
+ elif error["type"] == "value":
+ exception = ValueError
+ except_fn = lambda e: str(e) == error["message"]
+ else:
+ raise RuntimeError(f"Invalid error type: {error['type']}")
+ return exception, except_fn
+
+def test_key_sort_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'key_sort_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ X_sorted = fromhex_all(test_data["sorted_pubkeys"])
+
+ assert key_sort(X) == X_sorted
+
+def test_key_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'key_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ T = fromhex_all(test_data["tweaks"])
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+
+ assert get_xonly_pk(key_agg(pubkeys)) == expected
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [T[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+
+ assert_raises(exception, lambda: key_agg_and_tweak(pubkeys, tweaks, is_xonly), except_fn)
+
+def test_nonce_gen_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'nonce_gen_vectors.json')) as f:
+ test_data = json.load(f)
+
+ for test_case in test_data["test_cases"]:
+ def get_value(key) -> bytes:
+ return bytes.fromhex(test_case[key])
+
+ def get_value_maybe(key) -> Optional[bytes]:
+ if test_case[key] is not None:
+ return get_value(key)
+ else:
+ return None
+
+ rand_ = get_value("rand_")
+ sk = get_value_maybe("sk")
+ pk = PlainPk(get_value("pk"))
+ aggpk = get_value_maybe("aggpk")
+ if aggpk is not None:
+ aggpk = XonlyPk(aggpk)
+ msg = get_value_maybe("msg")
+ extra_in = get_value_maybe("extra_in")
+ expected_secnonce = get_value("expected_secnonce")
+ expected_pubnonce = get_value("expected_pubnonce")
+
+ assert nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in) == (expected_secnonce, expected_pubnonce)
+
+def test_nonce_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'nonce_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ pnonce = fromhex_all(test_data["pnonces"])
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+ assert nonce_agg(pubnonces) == expected
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+ pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
+ assert_raises(exception, lambda: nonce_agg(pubnonces), except_fn)
+
+def test_sign_verify_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ secnonces = fromhex_all(test_data["secnonces"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ # The public nonce corresponding to secnonces[0] is at index 0
+ k_1 = int_from_bytes(secnonces[0][0:32])
+ k_2 = int_from_bytes(secnonces[0][32:64])
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None and R_s2 is not None
+ assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
+
+ aggnonces = fromhex_all(test_data["aggnonces"])
+ # The aggregate of the first three elements of pnonce is at index 0
+ assert(aggnonces[0] == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
+
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ sign_error_test_cases = test_data["sign_error_test_cases"]
+ verify_fail_test_cases = test_data["verify_fail_test_cases"]
+ verify_error_test_cases = test_data["verify_error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ # Make sure that pubnonces and aggnonce in the test vector are
+ # consistent
+ assert nonce_agg(pubnonces) == aggnonce
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ secnonce_tmp = bytearray(secnonces[0])
+ assert sign(secnonce_tmp, sk, session_ctx) == expected
+ assert partial_sig_verify(expected, pubnonces, pubkeys, [], [], msg, signer_index)
+
+ for i, test_case in enumerate(sign_error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ msg = msgs[test_case["msg_index"]]
+ secnonce = bytearray(secnonces[test_case["secnonce_index"]])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
+
+ for test_case in verify_fail_test_cases:
+ sig = bytes.fromhex(test_case["sig"])
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+
+ assert not partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index)
+
+ for i, test_case in enumerate(verify_error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ sig = bytes.fromhex(test_case["sig"])
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+
+ assert_raises(exception, lambda: partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index), except_fn)
+
+def test_tweak_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ secnonce = bytearray(bytes.fromhex(test_data["secnonce"]))
+ pnonce = fromhex_all(test_data["pnonces"])
+ # The public nonce corresponding to secnonce is at index 0
+ k_1 = int_from_bytes(secnonce[0:32])
+ k_2 = int_from_bytes(secnonce[32:64])
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None and R_s2 is not None
+ assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
+
+ aggnonce = bytes.fromhex(test_data["aggnonce"])
+ # The aggnonce is the aggregate of the first three elements of pnonce
+ assert(aggnonce == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
+
+ tweak = fromhex_all(test_data["tweaks"])
+ msg = bytes.fromhex(test_data["msg"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ secnonce_tmp = bytearray(secnonce)
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ assert sign(secnonce_tmp, sk, session_ctx) == expected
+ assert partial_sig_verify(expected, pubnonces, pubkeys, tweaks, is_xonly, msg, signer_index)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ signer_index = test_case["signer_index"]
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
+
+def test_det_sign_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'det_sign_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggothernonce = bytes.fromhex(test_case["aggothernonce"])
+ tweaks = fromhex_all(test_case["tweaks"])
+ is_xonly = test_case["is_xonly"]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
+ expected = fromhex_all(test_case["expected"])
+
+ pubnonce, psig = deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+ assert pubnonce == expected[0]
+ assert psig == expected[1]
+
+ pubnonces = [aggothernonce, pubnonce]
+ aggnonce = nonce_agg(pubnonces)
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert partial_sig_verify_internal(psig, pubnonce, pubkeys[signer_index], session_ctx)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggothernonce = bytes.fromhex(test_case["aggothernonce"])
+ tweaks = fromhex_all(test_case["tweaks"])
+ is_xonly = test_case["is_xonly"]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
+
+ try_fn = lambda: deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+ assert_raises(exception, try_fn, except_fn)
+
+def test_sig_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'sig_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+
+ # These nonces are only required if the tested API takes the individual
+ # nonces and not the aggregate nonce.
+ pnonce = fromhex_all(test_data["pnonces"])
+
+ tweak = fromhex_all(test_data["tweaks"])
+ psig = fromhex_all(test_data["psigs"])
+
+ msg = bytes.fromhex(test_data["msg"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = bytes.fromhex(test_case["aggnonce"])
+ assert aggnonce == nonce_agg(pubnonces)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ psigs = [psig[i] for i in test_case["psig_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ sig = partial_sig_agg(psigs, session_ctx)
+ assert sig == expected
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+ assert schnorr_verify(msg, aggpk, sig)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = nonce_agg(pubnonces)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ psigs = [psig[i] for i in test_case["psig_indices"]]
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert_raises(exception, lambda: partial_sig_agg(psigs, session_ctx), except_fn)
+
+def test_sign_and_verify_random(iters: int) -> None:
+ for i in range(iters):
+ sk_1 = secrets.token_bytes(32)
+ sk_2 = secrets.token_bytes(32)
+ pk_1 = individual_pk(sk_1)
+ pk_2 = individual_pk(sk_2)
+ pubkeys = [pk_1, pk_2]
+
+ # In this example, the message and aggregate pubkey are known
+ # before nonce generation, so they can be passed into the nonce
+ # generation function as a defense-in-depth measure to protect
+ # against nonce reuse.
+ #
+ # If these values are not known when nonce_gen is called, empty
+ # byte arrays can be passed in for the corresponding arguments
+ # instead.
+ msg = secrets.token_bytes(32)
+ v = secrets.randbelow(4)
+ tweaks = [secrets.token_bytes(32) for _ in range(v)]
+ is_xonly = [secrets.choice([False, True]) for _ in range(v)]
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+
+ # Use a non-repeating counter for extra_in
+ secnonce_1, pubnonce_1 = nonce_gen(sk_1, pk_1, aggpk, msg, i.to_bytes(4, 'big'))
+
+ # On even iterations use regular signing algorithm for signer 2,
+ # otherwise use deterministic signing algorithm
+ if i % 2 == 0:
+ # Use a clock for extra_in
+ t = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
+ secnonce_2, pubnonce_2 = nonce_gen(sk_2, pk_2, aggpk, msg, t.to_bytes(8, 'big'))
+ else:
+ aggothernonce = nonce_agg([pubnonce_1])
+ rand = secrets.token_bytes(32)
+ pubnonce_2, psig_2 = deterministic_sign(sk_2, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+
+ pubnonces = [pubnonce_1, pubnonce_2]
+ aggnonce = nonce_agg(pubnonces)
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ psig_1 = sign(secnonce_1, sk_1, session_ctx)
+ assert partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 0)
+ # An exception is thrown if secnonce_1 is accidentally reused
+ assert_raises(ValueError, lambda: sign(secnonce_1, sk_1, session_ctx), lambda e: True)
+
+ # Wrong signer index
+ assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
+
+ # Wrong message
+ assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, secrets.token_bytes(32), 0)
+
+ if i % 2 == 0:
+ psig_2 = sign(secnonce_2, sk_2, session_ctx)
+ assert partial_sig_verify(psig_2, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
+
+ sig = partial_sig_agg([psig_1, psig_2], session_ctx)
+ assert schnorr_verify(msg, aggpk, sig)
+
+if __name__ == '__main__':
+ test_key_sort_vectors()
+ test_key_agg_vectors()
+ test_nonce_gen_vectors()
+ test_nonce_agg_vectors()
+ test_sign_verify_vectors()
+ test_tweak_vectors()
+ test_det_sign_vectors()
+ test_sig_agg_vectors()
+ test_sign_and_verify_random(6)
diff --git a/bip-0327/tests.sh b/bip-0327/tests.sh
new file mode 100755
index 0000000..b363f40
--- /dev/null
+++ b/bip-0327/tests.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+set -e
+
+cd "$(dirname "$0")"
+mypy --no-error-summary reference.py
+python3 reference.py
+python3 gen_vectors_helper.py > /dev/null
diff --git a/bip-0327/vectors/det_sign_vectors.json b/bip-0327/vectors/det_sign_vectors.json
new file mode 100644
index 0000000..261669c
--- /dev/null
+++ b/bip-0327/vectors/det_sign_vectors.json
@@ -0,0 +1,144 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "020000000000000000000000000000000000000000000000000000000000000007"
+ ],
+ "msgs": [
+ "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "2626262626262626262626262626262626262626262626262626262626262626262626262626"
+ ],
+ "valid_test_cases": [
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [0, 1, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": [
+ "03D96275257C2FCCBB6EEB77BDDF51D3C88C26EE1626C6CDA8999B9D34F4BA13A60309BE2BF883C6ABE907FA822D9CA166D51A3DCC28910C57528F6983FC378B7843",
+ "41EA65093F71D084785B20DC26A887CD941C9597860A21660CBDB9CC2113CAD3"
+ ]
+ },
+ {
+ "rand": null,
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 0, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "expected": [
+ "028FBCCF5BB73A7B61B270BAD15C0F9475D577DD85C2157C9D38BEF1EC922B48770253BE3638C87369BC287E446B7F2C8CA5BEB9FFBD1EA082C62913982A65FC214D",
+ "AEAA31262637BFA88D5606679018A0FEEEC341F3107D1199857F6C81DE61B8DD"
+ ]
+ },
+ {
+ "rand": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "aggothernonce": "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 1,
+ "signer_index": 2,
+ "expected": [
+ "024FA8D774F0C8743FAA77AFB4D08EE5A013C2E8EEAD8A6F08A77DDD2D28266DB803050905E8C994477F3F2981861A2E3791EF558626E645FBF5AA131C5D6447C2C2",
+ "FEE28A56B8556B7632E42A84122C51A4861B1F2DEC7E81B632195E56A52E3E13"
+ ],
+ "comment": "Message longer than 32 bytes"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
+ "key_indices": [0, 1, 2],
+ "tweaks": ["E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB"],
+ "is_xonly": [true],
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": [
+ "031E07C0D11A0134E55DB1FC16095ADCBD564236194374AA882BFB3C78273BF673039D0336E8CA6288C00BFC1F8B594563529C98661172B9BC1BE85C23A4CE1F616B",
+ "7B1246C5889E59CB0375FA395CC86AC42D5D7D59FD8EAB4FDF1DCAB2B2F006EA"
+ ],
+ "comment": "Tweaked public key"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 0, 3],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 2,
+ "contrib": "pubkey"
+ },
+ "comment": "Signer 2 provided an invalid public key"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "error": {
+ "type": "value",
+ "message": "The signer's pubkey must be included in the list of pubkeys."
+ },
+ "comment": "The signers pubkey is not in the list of pubkeys"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0437C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggothernonce"
+ },
+ "comment": "aggothernonce is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0000000000000000000000000000000000000000000000000000000000000000000287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggothernonce"
+ },
+ "comment": "aggothernonce is invalid because first half corresponds to point at infinity"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": ["FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"],
+ "is_xonly": [false],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/key_agg_vectors.json b/bip-0327/vectors/key_agg_vectors.json
new file mode 100644
index 0000000..b2e623d
--- /dev/null
+++ b/bip-0327/vectors/key_agg_vectors.json
@@ -0,0 +1,88 @@
+{
+ "pubkeys": [
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "020000000000000000000000000000000000000000000000000000000000000005",
+ "02FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
+ "04F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
+ ],
+ "tweaks": [
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
+ "252E4BD67410A76CDF933D30EAA1608214037F1B105A013ECCD3C5C184A6110B"
+ ],
+ "valid_test_cases": [
+ {
+ "key_indices": [0, 1, 2],
+ "expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
+ },
+ {
+ "key_indices": [2, 1, 0],
+ "expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
+ },
+ {
+ "key_indices": [0, 0, 0],
+ "expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
+ },
+ {
+ "key_indices": [0, 0, 1, 1],
+ "expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "key_indices": [0, 3],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubkey"
+ },
+ "comment": "Invalid public key"
+ },
+ {
+ "key_indices": [0, 4],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubkey"
+ },
+ "comment": "Public key exceeds field size"
+ },
+ {
+ "key_indices": [5, 0],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubkey"
+ },
+ "comment": "First byte of public key is not 2 or 3"
+ },
+ {
+ "key_indices": [0, 1],
+ "tweak_indices": [0],
+ "is_xonly": [true],
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is out of range"
+ },
+ {
+ "key_indices": [6],
+ "tweak_indices": [1],
+ "is_xonly": [false],
+ "error": {
+ "type": "value",
+ "message": "The result of tweaking cannot be infinity."
+ },
+ "comment": "Intermediate tweaking result is point at infinity"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/key_sort_vectors.json b/bip-0327/vectors/key_sort_vectors.json
new file mode 100644
index 0000000..de088a7
--- /dev/null
+++ b/bip-0327/vectors/key_sort_vectors.json
@@ -0,0 +1,18 @@
+{
+ "pubkeys": [
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EFF",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8"
+ ],
+ "sorted_pubkeys": [
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EFF",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
+ ]
+}
diff --git a/bip-0327/vectors/nonce_agg_vectors.json b/bip-0327/vectors/nonce_agg_vectors.json
new file mode 100644
index 0000000..1c04b88
--- /dev/null
+++ b/bip-0327/vectors/nonce_agg_vectors.json
@@ -0,0 +1,51 @@
+{
+ "pnonces": [
+ "020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E66603BA47FBC1834437B3212E89A84D8425E7BF12E0245D98262268EBDCB385D50641",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
+ "020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E6660279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60379BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "04FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B831",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A602FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
+ ],
+ "valid_test_cases": [
+ {
+ "pnonce_indices": [0, 1],
+ "expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
+ },
+ {
+ "pnonce_indices": [2, 3],
+ "expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
+ "comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "pnonce_indices": [0, 4],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 1 is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "pnonce_indices": [5, 1],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 0 is invalid because the second half does not correspond to an X coordinate"
+ },
+ {
+ "pnonce_indices": [6, 1],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 0 is invalid because second half exceeds field size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/nonce_gen_vectors.json b/bip-0327/vectors/nonce_gen_vectors.json
new file mode 100644
index 0000000..ced946f
--- /dev/null
+++ b/bip-0327/vectors/nonce_gen_vectors.json
@@ -0,0 +1,44 @@
+{
+ "test_cases": [
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "0101010101010101010101010101010101010101010101010101010101010101",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "B114E502BEAA4E301DD08A50264172C84E41650E6CB726B410C0694D59EFFB6495B5CAF28D045B973D63E3C99A44B807BDE375FD6CB39E46DC4A511708D0E9D2024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "02F7BE7089E8376EB355272368766B17E88E7DB72047D05E56AA881EA52B3B35DF02C29C8046FDD0DED4C7E55869137200FBDBFE2EB654267B6D7013602CAED3115A"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "E862B068500320088138468D47E0E6F147E01B6024244AE45EAC40ACE5929B9F0789E051170B9E705D0B9EB49049A323BBBBB206D8E05C19F46C6228742AA7A9024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "023034FA5E2679F01EE66E12225882A7A48CC66719B1B9D3B6C4DBD743EFEDA2C503F3FD6F01EB3A8E9CB315D73F1F3D287CAFBB44AB321153C6287F407600205109"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "3221975ACBDEA6820EABF02A02B7F27D3A8EF68EE42787B88CBEFD9AA06AF3632EE85B1A61D8EF31126D4663A00DD96E9D1D4959E72D70FE5EBB6E7696EBA66F024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "02E5BBC21C69270F59BD634FCBFA281BE9D76601295345112C58954625BF23793A021307511C79F95D38ACACFF1B4DA98228B77E65AA216AD075E9673286EFB4EAF3"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": null,
+ "pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "aggpk": null,
+ "msg": null,
+ "extra_in": null,
+ "expected_secnonce": "89BDD787D0284E5E4D5FC572E49E316BAB7E21E3B1830DE37DFE80156FA41A6D0B17AE8D024C53679699A6FD7944D9C4A366B514BAF43088E0708B1023DD289702F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "expected_pubnonce": "02C96E7CB1E8AA5DAC64D872947914198F607D90ECDE5200DE52978AD5DED63C000299EC5117C2D29EDEE8A2092587C3909BE694D5CFF0667D6C02EA4059F7CD9786"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/sig_agg_vectors.json b/bip-0327/vectors/sig_agg_vectors.json
new file mode 100644
index 0000000..04a7bc6
--- /dev/null
+++ b/bip-0327/vectors/sig_agg_vectors.json
@@ -0,0 +1,151 @@
+{
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02D2DC6F5DF7C56ACF38C7FA0AE7A759AE30E19B37359DFDE015872324C7EF6E05",
+ "03C7FB101D97FF930ACD0C6760852EF64E69083DE0B06AC6335724754BB4B0522C",
+ "02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581"
+ ],
+ "pnonces": [
+ "036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E",
+ "03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00",
+ "02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6",
+ "031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9",
+ "023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A",
+ "02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00"
+ ],
+ "tweaks": [
+ "B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
+ "A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
+ "75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"
+ ],
+ "psigs": [
+ "B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB",
+ "6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64",
+ "9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505",
+ "66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15",
+ "4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE",
+ "DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4",
+ "97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC",
+ "53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971",
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
+ ],
+ "msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869",
+ "valid_test_cases": [
+ {
+ "aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
+ "nonce_indices": [
+ 0,
+ 1
+ ],
+ "key_indices": [
+ 0,
+ 1
+ ],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [
+ 0,
+ 1
+ ],
+ "expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
+ },
+ {
+ "aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
+ "nonce_indices": [
+ 0,
+ 2
+ ],
+ "key_indices": [
+ 0,
+ 2
+ ],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [
+ 2,
+ 3
+ ],
+ "expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
+ },
+ {
+ "aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
+ "nonce_indices": [
+ 0,
+ 3
+ ],
+ "key_indices": [
+ 0,
+ 2
+ ],
+ "tweak_indices": [
+ 0
+ ],
+ "is_xonly": [
+ false
+ ],
+ "psig_indices": [
+ 4,
+ 5
+ ],
+ "expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
+ },
+ {
+ "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
+ "nonce_indices": [
+ 0,
+ 4
+ ],
+ "key_indices": [
+ 0,
+ 3
+ ],
+ "tweak_indices": [
+ 0,
+ 1,
+ 2
+ ],
+ "is_xonly": [
+ true,
+ false,
+ true
+ ],
+ "psig_indices": [
+ 6,
+ 7
+ ],
+ "expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
+ "nonce_indices": [
+ 0,
+ 4
+ ],
+ "key_indices": [
+ 0,
+ 3
+ ],
+ "tweak_indices": [
+ 0,
+ 1,
+ 2
+ ],
+ "is_xonly": [
+ true,
+ false,
+ true
+ ],
+ "psig_indices": [
+ 7,
+ 8
+ ],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1
+ },
+ "comment": "Partial signature is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/sign_verify_vectors.json b/bip-0327/vectors/sign_verify_vectors.json
new file mode 100644
index 0000000..b467640
--- /dev/null
+++ b/bip-0327/vectors/sign_verify_vectors.json
@@ -0,0 +1,212 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA661",
+ "020000000000000000000000000000000000000000000000000000000000000007"
+ ],
+ "secnonces": [
+ "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
+ ],
+ "pnonces": [
+ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
+ "0237C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0387BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0200000000000000000000000000000000000000000000000000000000000000090287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480"
+ ],
+ "aggnonces": [
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "048465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61020000000000000000000000000000000000000000000000000000000000000009",
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD6102FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
+ ],
+ "msgs": [
+ "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "",
+ "2626262626262626262626262626262626262626262626262626262626262626262626262626"
+ ],
+ "valid_test_cases": [
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
+ },
+ {
+ "key_indices": [1, 0, 2],
+ "nonce_indices": [1, 0, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 1,
+ "expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 2,
+ "expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
+ },
+ {
+ "key_indices": [0, 1],
+ "nonce_indices": [0, 3],
+ "aggnonce_index": 1,
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": "AE386064B26105404798F75DE2EB9AF5EDA5387B064B83D049CB7C5E08879531",
+ "comment": "Both halves of aggregate nonce correspond to point at infinity"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 1,
+ "signer_index": 0,
+ "expected": "D7D63FFD644CCDA4E62BC2BC0B1D02DD32A1DC3030E155195810231D1037D82D",
+ "comment": "Empty message"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 2,
+ "signer_index": 0,
+ "expected": "E184351828DA5094A97C79CABDAAA0BFB87608C32E8829A4DF5340A6F243B78C",
+ "comment": "38-byte message"
+ }
+ ],
+ "sign_error_test_cases": [
+ {
+ "key_indices": [1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "value",
+ "message": "The signer's pubkey must be included in the list of pubkeys."
+ },
+ "comment": "The signers pubkey is not in the list of pubkeys. This test case is optional: it can be skipped by implementations that do not check that the signer's pubkey is included in the list of pubkeys."
+ },
+ {
+ "key_indices": [1, 0, 3],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 2,
+ "contrib": "pubkey"
+ },
+ "comment": "Signer 2 provided an invalid public key"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 2,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 3,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 4,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid because second half exceeds field size"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 0,
+ "secnonce_index": 1,
+ "error": {
+ "type": "value",
+ "message": "first secnonce value is out of range."
+ },
+ "comment": "Secnonce is invalid which may indicate nonce reuse"
+ }
+ ],
+ "verify_fail_test_cases": [
+ {
+ "sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "comment": "Wrong signature (which is equal to the negation of valid signature)"
+ },
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 1,
+ "comment": "Wrong signer"
+ },
+ {
+ "sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "comment": "Signature exceeds group size"
+ }
+ ],
+ "verify_error_test_cases": [
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [4, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Invalid pubnonce"
+ },
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [3, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubkey"
+ },
+ "comment": "Invalid pubkey"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/tweak_vectors.json b/bip-0327/vectors/tweak_vectors.json
new file mode 100644
index 0000000..d0a7cfe
--- /dev/null
+++ b/bip-0327/vectors/tweak_vectors.json
@@ -0,0 +1,84 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
+ ],
+ "secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "pnonces": [
+ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046"
+ ],
+ "aggnonce": "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "tweaks": [
+ "E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB",
+ "AE2EA797CC0FE72AC5B97B97F3C6957D7E4199A167A58EB08BCAFFDA70AC0455",
+ "F52ECBC565B3D8BEA2DFD5B75A4F457E54369809322E4120831626F290FA87E0",
+ "1969AD73CC177FA0B4FCED6DF1F7BF9907E665FDE9BA196A74FED0A3CF5AEF9D",
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
+ ],
+ "msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "valid_test_cases": [
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0],
+ "is_xonly": [true],
+ "signer_index": 2,
+ "expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
+ "comment": "A single x-only tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0],
+ "is_xonly": [false],
+ "signer_index": 2,
+ "expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
+ "comment": "A single plain tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1],
+ "is_xonly": [false, true],
+ "signer_index": 2,
+ "expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
+ "comment": "A plain tweak followed by an x-only tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1, 2, 3],
+ "is_xonly": [false, false, true, true],
+ "signer_index": 2,
+ "expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
+ "comment": "Four tweaks: plain, plain, x-only, x-only."
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1, 2, 3],
+ "is_xonly": [true, false, true, false],
+ "signer_index": 2,
+ "expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
+ "comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
+ }
+ ],
+ "error_test_cases": [
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [4],
+ "is_xonly": [false],
+ "signer_index": 2,
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0329.mediawiki b/bip-0329.mediawiki
index 9a8b270..fc5da42 100644
--- a/bip-0329.mediawiki
+++ b/bip-0329.mediawiki
@@ -26,8 +26,10 @@ These standards are well supported and allow users to move easily between differ
There is, however, no defined standard to transfer any labels the user may have applied to the transactions, addresses, public keys, inputs, outputs or xpubs in their wallet.
The UTXO model that Bitcoin uses makes these labels particularly valuable as they may indicate the source of funds, whether received externally or as a result of change from a prior transaction.
In both cases, care must be taken when spending to avoid undesirable leaks of private information.
+
Labels provide valuable guidance in this regard, and have even become mandatory when spending in several Bitcoin wallets.
Allowing users to import and export their labels in a standardized way ensures that they do not experience lock-in to a particular wallet application.
+In addition, many wallets allow unspent outputs to be frozen or made unspendable within the wallet. Since this wallet-related metadata is similar to labels and not captured elsewhere, it is also included in this format.
==Rationale==
@@ -44,7 +46,7 @@ It is also a convenient format for command-line processing, which is often line-
Further to the JSON Lines specification, an export of labels from a wallet must be a UTF-8 encoded text file, containing one record per line consisting of a valid JSON object.
Lines are separated by <tt>\n</tt>. Multiline values are not permitted.
-Each JSON object must contain 3 key/value pairs, defined as follows:
+Each JSON object must contain 3 or 4 key/value pairs, defined as follows:
{| class="wikitable"
|-
@@ -59,6 +61,12 @@ Each JSON object must contain 3 key/value pairs, defined as follows:
|-
| <tt>label</tt>
| The label applied to the reference
+|-
+| <tt>origin</tt>
+| Optional key origin information referencing the wallet associated with the label
+|-
+| <tt>spendable</tt>
+| One of <tt>true</tt> or <tt>false</tt>, denoting if an output should be spendable by the wallet
|}
The reference is defined for each <tt>type</tt> as follows:
@@ -94,6 +102,11 @@ The reference is defined for each <tt>type</tt> as follows:
| <tt>xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8Nq...</tt>
|}
+Each JSON object must contain both <tt>type</tt> and <tt>ref</tt> properties. The <tt>label</tt>, <tt>origin</tt> and <tt>spendable</tt> properties are optional. If the <tt>label</tt> or <tt>spendable</tt> properties are omitted, the importing wallet should not alter these values. The <tt>origin</tt> property should only appear where type is <tt>tx</tt>, and the <tt>spendable</tt> property only where type is <tt>output</tt>.
+
+If present, the optional <tt>origin</tt> property must contain an abbreviated output descriptor (as defined by BIP380<ref>[https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki BIP-0380]</ref>) describing a BIP32 compatible originating wallet, including all key origin information but excluding any actual keys, any child path elements, or a checksum.
+This property should be used to disambiguate transaction labels from different wallets contained in the same export, particularly when exporting multiple accounts derived from the same seed.
+
Care should be taken when exporting due to the privacy sensitive nature of the data.
Encryption in transit over untrusted networks is highly recommended, and encryption at rest should also be considered.
Unencrypted exports should be deleted as soon as possible.
@@ -101,7 +114,7 @@ For security reasons no private key types are defined.
==Importing==
-* An importing wallet may ignore records it does not store, and truncate labels if necessary.
+* An importing wallet may ignore records it does not store, and truncate labels if necessary. A suggested default for maximum label length is 255 characters, and an importing wallet should consider warning the user if truncation is applied.
* Wallets importing public key records may derive addresses from them to match against known wallet addresses.
* Wallets importing extended public keys may match them against signers, for example in a multisig setup.
@@ -114,12 +127,13 @@ However, importing wallets complying to this specification may ignore types not
The following fragment represents a wallet label export:
<pre>
-{ "type": "tx", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd", "label": "Transaction" }
+{ "type": "tx", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd", "label": "Transaction", "origin": "wpkh([d34db33f/84'/0'/0'])" }
{ "type": "addr", "ref": "bc1q34aq5drpuwy3wgl9lhup9892qp6svr8ldzyy7c", "label": "Address" }
{ "type": "pubkey", "ref": "0283409659355b6d1cc3c32decd5d561abaac86c37a353b52895a5e6c196d6f448", "label": "Public Key" }
{ "type": "input", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:0", "label": "Input" }
-{ "type": "output", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:1", "label": "Output" }
+{ "type": "output", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:1", "label": "Output" , "spendable" : "false" }
{ "type": "xpub", "ref": "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8", "label": "Extended Public Key" }
+{ "type": "tx", "ref": "f546156d9044844e02b181026a1a407abfca62e7ea1159f87bbeaa77b4286c74", "label": "Account #1 Transaction", "origin": "wpkh([d34db33f/84'/0'/1'])" }
</pre>
==Reference Implementation==
diff --git a/bip-0340.mediawiki b/bip-0340.mediawiki
index 8128650..c941916 100644
--- a/bip-0340.mediawiki
+++ b/bip-0340.mediawiki
@@ -6,7 +6,7 @@
Tim Ruffing <crypto@timruffing.de>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0340
- Status: Draft
+ Status: Final
Type: Standards Track
License: BSD-2-Clause
Created: 2020-01-19
@@ -86,7 +86,7 @@ Despite halving the size of the set of valid public keys, implicit Y coordinates
For example, without tagged hashing a BIP340 signature could also be valid for a signature scheme where the only difference is that the arguments to the hash function are reordered. Worse, if the BIP340 nonce derivation function was copied or independently created, then the nonce could be accidentally reused in the other scheme leaking the secret key.
-This proposal suggests to include the tag by prefixing the hashed data with ''SHA256(tag) || SHA256(tag)''. Because this is a 64-byte long context-specific constant and the ''SHA256'' block size is also 64 bytes, optimized implementations are possible (identical to SHA256 itself, but with a modified initial state). Using SHA256 of the tag name itself is reasonably simple and efficient for implementations that don't choose to use the optimization.
+This proposal suggests to include the tag by prefixing the hashed data with ''SHA256(tag) || SHA256(tag)''. Because this is a 64-byte long context-specific constant and the ''SHA256'' block size is also 64 bytes, optimized implementations are possible (identical to SHA256 itself, but with a modified initial state). Using SHA256 of the tag name itself is reasonably simple and efficient for implementations that don't choose to use the optimization. In general, tags can be arbitrary byte arrays, but are suggested to be textual descriptions in UTF-8 encoding.
'''Final scheme''' As a result, our final scheme ends up using public key ''pk'' which is the X coordinate of a point ''P'' on the curve whose Y coordinate is even and signatures ''(r,s)'' where ''r'' is the X coordinate of a point ''R'' whose Y coordinate is even. The signature satisfies ''s⋅G = R + tagged_hash(r || pk || m)⋅P''.
@@ -116,7 +116,7 @@ The following conventions are used, with constants as defined for [https://www.s
*** Let ''y = c<sup>(p+1)/4</sup> mod p''.
*** Fail if ''c &ne; y<sup>2</sup> mod p''.
*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y'' if ''y mod 2 = 0'' or ''y(P) = p-y'' otherwise.
-** The function ''hash<sub>tag</sub>(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''.
+** The function ''hash<sub>name</sub>(x)'' where ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)'', where ''tag'' is the UTF-8 encoding of ''name''.
==== Public Key Generation ====
@@ -138,7 +138,7 @@ As an alternative to generating keys randomly, it is also possible and safe to r
Input:
* The secret key ''sk'': a 32-byte array
-* The message ''m'': a 32-byte array
+* The message ''m'': a byte array
* Auxiliary random data ''a'': a 32-byte array
The algorithm ''Sign(sk, m)'' is defined as:
@@ -174,7 +174,7 @@ It should be noted that various alternative signing algorithms can be used to pr
Input:
* The public key ''pk'': a 32-byte array
-* The message ''m'': a 32-byte array
+* The message ''m'': a byte array
* A signature ''sig'': a 64-byte array
The algorithm ''Verify(pk, m, sig)'' is defined as:
@@ -197,7 +197,7 @@ Note that the correctness of verification relies on the fact that ''lift_x'' alw
Input:
* The number ''u'' of signatures
* The public keys ''pk<sub>1..u</sub>'': ''u'' 32-byte arrays
-* The messages ''m<sub>1..u</sub>'': ''u'' 32-byte arrays
+* The messages ''m<sub>1..u</sub>'': ''u'' byte arrays
* The signatures ''sig<sub>1..u</sub>'': ''u'' 64-byte arrays
The algorithm ''BatchVerify(pk<sub>1..u</sub>, m<sub>1..u</sub>, sig<sub>1..u</sub>)'' is defined as:
@@ -213,6 +213,50 @@ The algorithm ''BatchVerify(pk<sub>1..u</sub>, m<sub>1..u</sub>, sig<sub>1..u</s
If all individual signatures are valid (i.e., ''Verify'' would return success for them), ''BatchVerify'' will always return success. If at least one signature is invalid, ''BatchVerify'' will return success with at most a negligible probability.
+=== Usage Considerations ===
+
+==== Messages of Arbitrary Size ====
+
+The signature scheme specified in this BIP accepts byte strings of arbitrary size as input messages.<ref>In theory, the message size is restricted due to the fact that SHA256 accepts byte strings only up to size of 2^61-1 bytes.</ref>
+It is understood that implementations may reject messages which are too large in their environment or application context,
+e.g., messages which exceed predefined buffers or would otherwise cause resource exhaustion.
+
+Earlier revisions of this BIP required messages to be exactly 32 bytes.
+This restriction puts a burden on callers
+who typically need to perform pre-hashing of the actual input message by feeding it through SHA256 (or another collision-resistant cryptographic hash function)
+to create a 32-byte digest which can be passed to signing or verification
+(as for example done in [[bip-0341.mediawiki|BIP341]].)
+
+Since pre-hashing may not always be desirable,
+e.g., when actual messages are shorter than 32 bytes,<ref>Another reason to omit pre-hashing is to protect against certain types of cryptanalytic advances against the hash function used for pre-hashing: If pre-hashing is used, an attacker that can find collisions in the pre-hashing function can necessarily forge signatures under chosen-message attacks. If pre-hashing is not used, an attacker that can find collisions in SHA256 (as used inside the signature scheme) may not be able to forge signatures. However, this seeming advantage is mostly irrelevant in the context of Bitcoin, which already relies on collision resistance of SHA256 in other places, e.g., for transaction hashes.</ref>
+the restriction to 32-byte messages has been lifted.
+We note that pre-hashing is recommended for performance reasons in applications that deal with large messages.
+If large messages are not pre-hashed,
+the algorithms of the signature scheme will perform more hashing internally.
+In particular, the signing algorithm needs two sequential hashing passes over the message,
+which means that the full message must necessarily be kept in memory during signing,
+and large messages entail a runtime penalty.<ref>Typically, messages of 56 bytes or longer enjoy a performance benefit from pre-hashing, assuming the speed of SHA256 inside the signing algorithm matches that of the pre-hashing done by the calling application.</ref>
+
+==== Domain Separation ====
+
+It is good cryptographic practice to use a key pair only for a single purpose.
+Nevertheless, there may be situations in which it may be desirable to use the same key pair in multiple contexts,
+i.e., to sign different types of messages within the same application
+or even messages in entirely different applications
+(e.g., a secret key may be used to sign Bitcoin transactions as well plain text messages).
+
+As a consequence, applications should ensure that a signed application message intended for one context is never deemed valid in a different context
+(e.g., a signed plain text message should never be misinterpreted as a signed Bitcoin transaction, because this could cause unintended loss of funds).
+This is called "domain separation" and it is typically realized by partitioning the message space.
+Even if key pairs are intended to be used only within a single context,
+domain separation is a good idea because it makes it easy to add more contexts later.
+
+As a best practice, we recommend applications to use exactly one of the following methods to pre-process application messages before passing it to the signature scheme:
+* Either, pre-hash the application message using ''hash<sub>name</sub>'', where ''name'' identifies the context uniquely (e.g., "foo-app/signed-bar"),
+* or prefix the actual message with a 33-byte string that identifies the context uniquely (e.g., the UTF-8 encoding of "foo-app/signed-bar", padded with null bytes to 33 bytes).
+
+As the two pre-processing methods yield different message sizes (32 bytes vs. at least 33 bytes), there is no risk of collision between them.
+
== Applications ==
There are several interesting applications beyond simple signatures.
@@ -248,6 +292,7 @@ The reference implementation is for demonstration purposes only and not to be us
To help implementors understand updates to this BIP, we keep a list of substantial changes.
* 2022-08: Fix function signature of lift_x in reference code
+* 2023-04: Allow messages of arbitrary size
== Footnotes ==
diff --git a/bip-0340/reference.py b/bip-0340/reference.py
index 162bb88..b327e0a 100644
--- a/bip-0340/reference.py
+++ b/bip-0340/reference.py
@@ -96,8 +96,6 @@ def pubkey_gen(seckey: bytes) -> bytes:
return bytes_from_point(P)
def schnorr_sign(msg: bytes, seckey: bytes, aux_rand: bytes) -> bytes:
- if len(msg) != 32:
- raise ValueError('The message must be a 32-byte array.')
d0 = int_from_bytes(seckey)
if not (1 <= d0 <= n - 1):
raise ValueError('The secret key must be an integer in the range 1..n-1.')
@@ -121,8 +119,6 @@ def schnorr_sign(msg: bytes, seckey: bytes, aux_rand: bytes) -> bytes:
return sig
def schnorr_verify(msg: bytes, pubkey: bytes, sig: bytes) -> bool:
- if len(msg) != 32:
- raise ValueError('The message must be a 32-byte array.')
if len(pubkey) != 32:
raise ValueError('The public key must be a 32-byte array.')
if len(sig) != 64:
diff --git a/bip-0340/test-vectors.csv b/bip-0340/test-vectors.csv
index a1a63e1..6723391 100644
--- a/bip-0340/test-vectors.csv
+++ b/bip-0340/test-vectors.csv
@@ -14,3 +14,7 @@ index,secret key,public key,aux_rand,message,signature,verification result,comme
12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size
13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order
14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size
+15,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,,71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63,TRUE,message of size 0 (added 2022-12)
+16,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,11,08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF,TRUE,message of size 1 (added 2022-12)
+17,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,0102030405060708090A0B0C0D0E0F1011,5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5,TRUE,message of size 17 (added 2022-12)
+18,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999,403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367,TRUE,message of size 100 (added 2022-12)
diff --git a/bip-0340/test-vectors.py b/bip-0340/test-vectors.py
index d1bf6b2..317f2ec 100644
--- a/bip-0340/test-vectors.py
+++ b/bip-0340/test-vectors.py
@@ -249,6 +249,20 @@ def vector14():
return (None, pubkey, None, msg, sig, "FALSE", "public key is not a valid X coordinate because it exceeds the field size")
+def varlen_vector(msg_int):
+ seckey = bytes_from_int(int(16 * "0340", 16))
+ pubkey = pubkey_gen(seckey)
+ aux_rand = bytes_from_int(0)
+ msg = msg_int.to_bytes((msg_int.bit_length() + 7) // 8, "big")
+ sig = schnorr_sign(msg, seckey, aux_rand)
+ comment = "message of size %d (added 2022-12)"
+ return (seckey, pubkey, aux_rand, msg, sig, "TRUE", comment % len(msg))
+
+vector15 = lambda : varlen_vector(0)
+vector16 = lambda : varlen_vector(0x11)
+vector17 = lambda : varlen_vector(0x0102030405060708090A0B0C0D0E0F1011)
+vector18 = lambda : varlen_vector(int(100 * "99", 16))
+
vectors = [
vector0(),
vector1(),
@@ -264,7 +278,11 @@ vectors = [
vector11(),
vector12(),
vector13(),
- vector14()
+ vector14(),
+ vector15(),
+ vector16(),
+ vector17(),
+ vector18(),
]
# Converts the byte strings of a test vector into hex strings
diff --git a/bip-0341.mediawiki b/bip-0341.mediawiki
index 8d2af3c..639cec6 100644
--- a/bip-0341.mediawiki
+++ b/bip-0341.mediawiki
@@ -7,7 +7,7 @@
Anthony Towns <aj@erisian.com.au>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0341
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-01-19
License: BSD-3-Clause
@@ -105,7 +105,7 @@ If the parameters take acceptable values, the message is the concatenation of th
** ''nLockTime'' (4): the ''nLockTime'' of the transaction.
** If the ''hash_type & 0x80'' does not equal <code>SIGHASH_ANYONECANPAY</code>:
*** ''sha_prevouts'' (32): the SHA256 of the serialization of all input outpoints.
-*** ''sha_amounts'' (32): the SHA256 of the serialization of all spent output amounts.
+*** ''sha_amounts'' (32): the SHA256 of the serialization of all input amounts.
*** ''sha_scriptpubkeys'' (32): the SHA256 of all spent outputs' ''scriptPubKeys'', serialized as script inside <code>CTxOut</code>.
*** ''sha_sequences'' (32): the SHA256 of the serialization of all input ''nSequence''.
** If ''hash_type & 3'' does not equal <code>SIGHASH_NONE</code> or <code>SIGHASH_SINGLE</code>:
@@ -152,7 +152,7 @@ Satisfying any of these conditions is sufficient to spend the output.
'''Initial steps''' The first step is determining what the internal key and the organization of the rest of the scripts should be. The specifics are likely application dependent, but here are some general guidelines:
* When deciding between scripts with conditionals (<code>OP_IF</code> etc.) and splitting them up into multiple scripts (each corresponding to one execution path through the original script), it is generally preferable to pick the latter.
* When a single condition requires signatures with multiple keys, key aggregation techniques like MuSig can be used to combine them into a single key. The details are out of scope for this document, but note that this may complicate the signing procedure.
-* If one or more of the spending conditions consist of just a single key (after aggregation), the most likely one should be made the internal key. If no such condition exists, it may be worthwhile adding one that consists of an aggregation of all keys participating in all scripts combined; effectively adding an "everyone agrees" branch. If that is inacceptable, pick as internal key a point with unknown discrete logarithm. One example of such a point is ''H = lift_x(0x0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0)'' which is [https://github.com/ElementsProject/secp256k1-zkp/blob/11af7015de624b010424273be3d91f117f172c82/src/modules/rangeproof/main_impl.h#L16 constructed] by taking the hash of the standard uncompressed encoding of the [https://www.secg.org/sec2-v2.pdf secp256k1] base point ''G'' as X coordinate. In order to avoid leaking the information that key path spending is not possible it is recommended to pick a fresh integer ''r'' in the range ''0...n-1'' uniformly at random and use ''H + rG'' as internal key. It is possible to prove that this internal key does not have a known discrete logarithm with respect to ''G'' by revealing ''r'' to a verifier who can then reconstruct how the internal key was created.
+* If one or more of the spending conditions consist of just a single key (after aggregation), the most likely one should be made the internal key. If no such condition exists, it may be worthwhile adding one that consists of an aggregation of all keys participating in all scripts combined; effectively adding an "everyone agrees" branch. If that is inacceptable, pick as internal key a "Nothing Up My Sleeve" (NUMS) point, i.e., a point with unknown discrete logarithm. One example of such a point is ''H = lift_x(0x50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0)'' which is [https://github.com/ElementsProject/secp256k1-zkp/blob/11af7015de624b010424273be3d91f117f172c82/src/modules/rangeproof/main_impl.h#L16 constructed] by taking the hash of the standard uncompressed encoding of the [https://www.secg.org/sec2-v2.pdf secp256k1] base point ''G'' as X coordinate. In order to avoid leaking the information that key path spending is not possible it is recommended to pick a fresh integer ''r'' in the range ''0...n-1'' uniformly at random and use ''H + rG'' as internal key. It is possible to prove that this internal key does not have a known discrete logarithm with respect to ''G'' by revealing ''r'' to a verifier who can then reconstruct how the internal key was created.
* If the spending conditions do not require a script path, the output key should commit to an unspendable script path instead of having no script path. This can be achieved by computing the output key point as ''Q = P + int(hash<sub>TapTweak</sub>(bytes(P)))G''. <ref>'''Why should the output key always have a taproot commitment, even if there is no script path?'''
If the taproot output key is an aggregate of keys, there is the possibility for a malicious party to add a script path without being noticed by the other parties.
This allows to bypass the multiparty policy and to steal the coin.
diff --git a/bip-0342.mediawiki b/bip-0342.mediawiki
index bbefcaa..64d07cc 100644
--- a/bip-0342.mediawiki
+++ b/bip-0342.mediawiki
@@ -7,7 +7,7 @@
Anthony Towns <aj@erisian.com.au>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0342
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-01-19
License: BSD-3-Clause
diff --git a/bip-0343.mediawiki b/bip-0343.mediawiki
index 3d2f392..a47edc0 100644
--- a/bip-0343.mediawiki
+++ b/bip-0343.mediawiki
@@ -6,7 +6,7 @@
Michael Folkson <michaelfolkson@gmail.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0343
- Status: Proposed
+ Status: Final
Type: Standards Track
Created: 2021-04-25
License: BSD-3-Clause
diff --git a/bip-0350.mediawiki b/bip-0350.mediawiki
index 9873d80..439b2a2 100644
--- a/bip-0350.mediawiki
+++ b/bip-0350.mediawiki
@@ -5,7 +5,7 @@
Author: Pieter Wuille <pieter@wuille.net>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0350
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-12-16
License: BSD-2-Clause
diff --git a/bip-0370.mediawiki b/bip-0370.mediawiki
index 11b3125..cd448cc 100644
--- a/bip-0370.mediawiki
+++ b/bip-0370.mediawiki
@@ -2,7 +2,7 @@
BIP: 370
Layer: Applications
Title: PSBT Version 2
- Author: Andrew Chow <achow101@gmail.com>
+ Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0370
Status: Draft
@@ -62,7 +62,7 @@ The new global types for PSBT Version 2 are as follows:
| <tt>PSBT_GLOBAL_TX_VERSION = 0x02</tt>
| None
| No key data
-| <tt><32-bit little endian uint version></tt>
+| <tt><32-bit little endian int version></tt>
| The 32-bit little endian signed integer representing the version number of the transaction being created. Note that this is not the same as the PSBT version number specified by the PSBT_GLOBAL_VERSION field.
| 2
| 0
@@ -235,9 +235,9 @@ PSBTv2 introduces new roles and modifies some existing roles.
===Creator===
In PSBTv2, the Creator initializes the PSBT with 0 inputs and 0 outputs.
-The PSBT version number is set to 2. The transaction version number must be set to at least 2. <ref>'''Why does the transaction version number need to be at least 2?''' The transaction version number is part of the validation rules for some features such as OP_CHECKSEQUENCEVERIFY. Since it is backwards compatible, and there are other ways to disable those features (e.g. through sequence numbers), it is easier to require transactions be able to support these features than to try to negotiate the transaction version number.</ref>
+The PSBT version number is set to 2.
The Creator should also set PSBT_GLOBAL_FALLBACK_LOCKTIME.
-If the Creator is not also a Constructor and will be giving the PSBT to others to add inputs and outputs, the PSBT_GLOBAL_TX_MODIFIABLE field must be present and and the Inputs Modifiable and Outputs Modifiable flags set appropriately.
+If the Creator is not also a Constructor and will be giving the PSBT to others to add inputs and outputs, the PSBT_GLOBAL_TX_MODIFIABLE field must be present and the Inputs Modifiable and Outputs Modifiable flags set appropriately; moreover, the transaction version number must be set to at least 2. <ref>'''Why does the transaction version number need to be at least 2?''' The transaction version number is part of the validation rules for some features such as OP_CHECKSEQUENCEVERIFY. Since it is backwards compatible, and there are other ways to disable those features (e.g. through sequence numbers), it is easier to require transactions be able to support these features than to try to negotiate the transaction version number.</ref>
If the Creator is a Constructor and no inputs and outputs will be added by other entities, PSBT_GLOBAL_TX_MODIFIABLE may be omitted.
===Constructor===
@@ -288,7 +288,7 @@ The Extractor should produce a fully valid, network serialized transaction if al
==Backwards Compatibility==
-PSBTv2 shares the same gemeric format as PSBTv0 as defined in BIP 174. Parsers for PSBTv0 should
+PSBTv2 shares the same generic format as PSBTv0 as defined in BIP 174. Parsers for PSBTv0 should
be able to deserialize PSBTv2 with only changes to support the new fields.
However PSBTv2 is incompatible with PSBTv0, and vice versa due to the use of the PSBT_GLOBAL_VERSION.
diff --git a/bip-0371.mediawiki b/bip-0371.mediawiki
index 452584a..45b69f8 100644
--- a/bip-0371.mediawiki
+++ b/bip-0371.mediawiki
@@ -2,7 +2,7 @@
BIP: 371
Layer: Applications
Title: Taproot Fields for PSBT
- Author: Andrew Chow <andrew@achow101.com>
+ Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0371
Status: Draft
@@ -78,7 +78,7 @@ The new per-input types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_IN_TAP_BIP32_DERIVATION = 0x16</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this input. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this input. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -142,7 +142,7 @@ The new per-output types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_OUT_TAP_BIP32_DERIVATION = 0x07</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this output. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this output. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
diff --git a/bip-0380.mediawiki b/bip-0380.mediawiki
index f870734..c457a34 100644
--- a/bip-0380.mediawiki
+++ b/bip-0380.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: Output Script Descriptors General Operation
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0380
Status: Draft
@@ -49,6 +49,7 @@ Lastly, the use of common terminology and existing standards allow for Output Sc
Descriptors consist of several types of expressions.
The top level expression is a <tt>SCRIPT</tt>.
This expression may be followed by <tt>#CHECKSUM</tt>, where <tt>CHECKSUM</tt> is an 8 character alphanumeric descriptor checksum.
+Although the checksum is optional for parsing, applications may choose to reject descriptors that do not contain a checksum.
===Script Expressions===
@@ -198,6 +199,63 @@ def descsum_create(s):
</pre>
+==Test Vectors==
+
+The following tests cover the checksum and character set:
+
+* Valid checksum: <tt>raw(deadbeef)#89f8spxm</tt>
+* No checksum: <tt>raw(deadbeef)</tt>
+* Missing checksum: <tt>raw(deadbeef)#</tt>
+* Too long checksum (9 chars): <tt>raw(deadbeef)#89f8spxmx</tt>
+* Too short checksum (7 chars): <tt>raw(deadbeef)#89f8spx</tt>
+* Error in payload: <tt>raw(deedbeef)#89f8spxm</tt>
+* Error in checksum: <tt>raw(deedbeef)##9f8spxm</tt>
+* Invalid characters in payload: <tt>raw(Ü)#00000000</tt>
+
+The following tests cover key expressions:
+
+Valid expressions:
+
+* Compressed public key: <tt>0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Uncompressed public key: <tt>04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235</tt>
+* Public key with key origin: <tt>[deadbeef/0h/0h/0h]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Public key with key origin (<tt>'</tt> as hardened indicator): <tt>[deadbeef/0'/0'/0']0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Public key with key origin (mixed hardened indicator): <tt>[deadbeef/0'/0h/0']0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* WIF uncompressed private key <tt>5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss</tt>
+* WIF compressed private key <tt>L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1</tt>
+* Extended public key: <tt>xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL</tt>
+* Extended public key with key origin: <tt>[deadbeef/0h/1h/2h]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL</tt>
+* Extended public key with derivation: <tt>[deadbeef/0h/1h/2h]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3/4/5</tt>
+* Extended public key with derivation and children: <tt>[deadbeef/0h/1h/2h]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3/4/5/*</tt>
+* Extended public key with hardened derivation and unhardened children: <tt>xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3h/4h/5h/*</tt>
+* Extended public key with hardened derivation and children: <tt>xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3h/4h/5h/*h</tt>
+* Extended public key with key origin, hardened derivation and children: <tt>[deadbeef/0h/1h/2]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3h/4h/5h/*h</tt>
+* Extended private key: <tt>xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc</tt>
+* Extended private key with key origin: <tt>[deadbeef/0h/1h/2h]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc</tt>
+* Extended private key with derivation: <tt>[deadbeef/0h/1h/2h]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/3/4/5</tt>
+* Extended private key with derivation and children: <tt>[deadbeef/0h/1h/2h]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/3/4/5/*</tt>
+* Extended private key with hardened derivation and unhardened children: <tt>xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/3h/4h/5h/*</tt>
+* Extended private key with hardened derivation and children: <tt>xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/3h/4h/5h/*h</tt>
+* Extended private key with key origin, hardened derivation and children: <tt>[deadbeef/0h/1h/2]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/3h/4h/5h/*h</tt>
+
+Invalid expressiosn:
+
+* Children indicator in key origin: <tt>[deadbeef/0h/0h/0h/*]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Trailing slash in key origin: <tt>[deadbeef/0h/0h/0h/]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Too short fingerprint: <tt>[deadbef/0h/0h/0h]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Too long fingerprint: <tt>[deadbeeef/0h/0h/0h]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Invalid hardened indicators: <tt>[deadbeef/0f/0f/0f]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Invalid hardened indicators: <tt>[deadbeef/0H/0H/0H]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Invalid hardened indicators: <tt>[deadbeef/-0/-0/-0]0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600</tt>
+* Private key with derivation: <tt>L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1/0</tt>
+* Private key with derivation children: <tt>L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1/*</tt>
+* Derivation index out of range: <tt>xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648</tt>
+* Invalid derivation index: <tt>xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa</tt>
+* Multiple key origins: <tt>[aaaaaaaa][aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0</tt>
+* Missing key origin start: <tt>aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0</tt>
+* Non hex fingerprint: <tt>[gaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0</tt>
+* Key origin with no public key: <tt>[deadbeef]</tt>
+
==Backwards Compatibility==
Output script descriptors are an entirely new language which is not compatible with any existing software.
diff --git a/bip-0381.mediawiki b/bip-0381.mediawiki
index f439597..4b94278 100644
--- a/bip-0381.mediawiki
+++ b/bip-0381.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: Non-Segwit Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0381
Status: Draft
@@ -70,7 +70,51 @@ OP_HASH160 <SCRIPT_hash160> OP_EQUAL
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce. Descriptors involving derived child keys will have the 0th, 1st, and 2nd scripts listed.
+
+* <tt>pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)</tt>
+** <tt>2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac</tt>
+* <tt>pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac</tt>
+* <tt>pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)</tt>
+** <tt>76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac</tt>
+* <tt>pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac</tt>
+* <tt>pkh([deadbeef/1/2h/3/4h]03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac</tt>
+* <tt>pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)</tt>
+** <tt>4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac</tt>
+* <tt>pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+** <tt>4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac</tt>
+* <tt>pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)</tt>
+** <tt>76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac</tt>
+* <tt>pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+** <tt>76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac</tt>
+* <tt>sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))</tt>
+** <tt>a9141857af51a5e516552b3086430fd8ce55f7c1a52487</tt>
+* <tt>sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+** <tt>a9141857af51a5e516552b3086430fd8ce55f7c1a52487</tt>
+* <tt>sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))</tt>
+** <tt>a9141a31ad23bf49c247dd531a623c2ef57da3c400c587</tt>
+* <tt>sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+** <tt>a9141a31ad23bf49c247dd531a623c2ef57da3c400c587</tt>
+* <tt>pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)</tt>
+** <tt>76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac</tt>
+* <tt>pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)</tt>
+** <tt>76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac</tt>
+* <tt>pkh([bd16bee5/2147483647h]xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0)</tt>
+** <tt>76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac</tt>
+* <tt>pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)</tt>
+** <tt>210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac</tt>
+* <tt>pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)</tt>
+** <tt>210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac</tt>
+
+Invalid descriptors
+
+* <tt>pk()</tt> only accepts key expressions: <tt>pk(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>pkh()</tt> only accepts key expressions: <tt>pkh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>sh()</tt> only acceps script expressions: <tt>sh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+* <tt>sh()</tt> is top level only: <tt>sh(sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))</tt>
==Backwards Compatibility==
diff --git a/bip-0382.mediawiki b/bip-0382.mediawiki
index 768079e..942f62c 100644
--- a/bip-0382.mediawiki
+++ b/bip-0382.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: Segwit Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0382
Status: Draft
@@ -57,7 +57,52 @@ OP_0 <SCRIPT_sha256>
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce. Descriptors involving derived child keys will have the 0th, 1st, and 2nd scripts listed.
+
+* <tt>wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)</tt>
+** <tt>00149a1c78a507689f6f54b847ad1cef1e614ee23f1e</tt>
+* <tt>wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>00149a1c78a507689f6f54b847ad1cef1e614ee23f1e</tt>
+* <tt>wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/0)</tt>
+** <tt>0014326b2249e3a25d5dc60935f044ee835d090ba859</tt>
+* <tt>wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)</tt>
+** <tt>0014326b2249e3a25d5dc60935f044ee835d090ba859</tt>
+** <tt>0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7</tt>
+** <tt>00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27</tt>
+* <tt>sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))</tt>
+** <tt>a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87</tt>
+** <tt>a914bed59fc0024fae941d6e20a3b44a109ae740129287</tt>
+** <tt>a9148483aa1116eb9c05c482a72bada4b1db24af654387</tt>
+* <tt>sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*h))</tt>
+** <tt>a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87</tt>
+** <tt>a914bed59fc0024fae941d6e20a3b44a109ae740129287</tt>
+** <tt>a9148483aa1116eb9c05c482a72bada4b1db24af654387</tt>
+* <tt>wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))</tt>
+** <tt>0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b</tt>
+* <tt>wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+** <tt>0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b</tt>
+* <tt>wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))</tt>
+** <tt>00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa</tt>
+* <tt>wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+** <tt>00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa</tt>
+* <tt>sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))</tt>
+** <tt>a914b61b92e2ca21bac1e72a3ab859a742982bea960a87</tt>
+* <tt>sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))</tt>
+** <tt>a914b61b92e2ca21bac1e72a3ab859a742982bea960a87</tt>
+
+Invalid descriptors with descriptions
+
+* Uncompressed public key in <tt>wpkh()</tt>: <tt>wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)</tt>
+* Uncompressed public key in <tt>wpkh()</tt>: <tt>sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))</tt>
+* Uncompressed public key in <tt>wpkh()</tt>: <tt>wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+* Uncompressed public key in <tt>wpkh()</tt>: <tt>sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))</tt>
+* Uncompressed public keys under <tt>wsh()</tt>: <tt>wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))</tt>
+* Uncompressed public keys under <tt>wsh()</tt>: <tt>wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))</tt>
+* <tt>wpkh()</tt> nested in <tt>wsh()</tt>: <tt>wsh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>wsh()</tt> nested in <tt>wsh()</tt>: <tt>wsh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))</tt>
+* <tt>wsh()</tt> nested in <tt>wsh()</tt>: <tt>sh(wsh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))))</tt>
+* Script in <tt>wpkh()</tt>: <tt>wpkh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))</tt>
+* Key in <tt>wsh()</tt>: <tt>wsh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
==Backwards Compatibility==
diff --git a/bip-0383.mediawiki b/bip-0383.mediawiki
index 92e86e3..66e2f16 100644
--- a/bip-0383.mediawiki
+++ b/bip-0383.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: Multisig Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0383
Status: Draft
@@ -65,7 +65,37 @@ This changes the keys in lockstep and allows for output scripts to be indexed in
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce. Descriptors involving derived child keys will have the 0th, 1st, and 2nd scripts listed.
+
+* <tt>multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)</tt>
+** <tt>512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae</tt>
+* <tt>multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+** <tt>512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae</tt>
+* <tt>sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae</tt>
+* <tt>sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))</tt>
+** <tt>a91445a9a622a8b0a1269944be477640eedc447bbd8487</tt>
+* <tt>sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)</tt>
+** <tt>5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae</tt>
+** <tt>52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae</tt>
+** <tt>5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae</tt>
+* <tt>wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))</tt>
+** <tt>0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f</tt>
+** <tt>002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203</tt>
+** <tt>0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c</tt>
+* <tt>sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))</tt>
+** <tt>a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87</tt>
+* <tt>wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv))</tt>
+** <tt>0020376bd8344b8b6ebe504ff85ef743eaa1aa9272178223bcb6887e9378efb341ac</tt>
+* <tt>sh(wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv)))</tt>
+** <tt>a914c2c9c510e9d7f92fd6131e94803a8d34a8ef675e87</tt>
+
+Invalid descriptors
+
+* More than 15 keys in P2SH multisig: <tt>sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232))</tt>
+* Invalid threshold: <tt>multi(a,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+* Threshold of 0: <tt>multi(0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+* Threshold larger than keys: <tt>multi(3,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)</tt>
==Backwards Compatibility==
diff --git a/bip-0384.mediawiki b/bip-0384.mediawiki
index e735d74..02d6f76 100644
--- a/bip-0384.mediawiki
+++ b/bip-0384.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: combo() Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0384
Status: Draft
@@ -35,7 +35,38 @@ If the key is/has a compressed public key, then P2WPKH and P2SH-P2WPKH scripts a
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce. Descriptors involving derived child keys will have the 0th, and 1st scripts in additional sub-bullets.
+
+* <tt>combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)</tt>
+** <tt>2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac</tt>
+** <tt>76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac</tt>
+** <tt>00149a1c78a507689f6f54b847ad1cef1e614ee23f1e</tt>
+** <tt>a91484ab21b1b2fd065d4504ff693d832434b6108d7b87</tt>
+* <tt>combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+** <tt>4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac</tt>
+** <tt>76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac</tt>
+* <tt>combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)</tt>
+** <tt>2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac</tt>
+** <tt>76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac</tt>
+** <tt>001431a507b815593dfc51ffc7245ae7e5aee304246e</tt>
+** <tt>a9142aafb926eb247cb18240a7f4c07983ad1f37922687</tt>
+* <tt>combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)</tt>
+** Child 0
+*** <tt>2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac</tt>
+*** <tt>76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac</tt>
+*** <tt>0014f90e3178ca25f2c808dc76624032d352fdbdfaf2</tt>
+*** <tt>a91473e39884cb71ae4e5ac9739e9225026c99763e6687</tt>
+** Child 1
+*** <tt>21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac</tt>
+*** <tt>76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac</tt>
+*** <tt>0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7</tt>
+*** <tt>a91473e39884cb71ae4e5ac9739e9225026c99763e6687</tt>
+
+Invalid descriptors
+
+* <tt>combo()</tt> in <tt>sh</tt> : <tt>sh(combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>combo()</tt> in <tt>wsh</tt> : <tt>wsh(combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* Script in <tt>combo()</tt>: <tt>combo(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
==Backwards Compatibility==
diff --git a/bip-0385.mediawiki b/bip-0385.mediawiki
index 5c46d75..3e922b3 100644
--- a/bip-0385.mediawiki
+++ b/bip-0385.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: raw() and addr() Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0385
Status: Draft
@@ -44,7 +44,25 @@ The output script produced by this descriptor is the output script produced by t
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce.
+
+* <tt>raw(deadbeef)</tt>
+** <tt>deadbeef</tt>
+* <tt>raw(512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae)</tt>
+** <tt>512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae</tt>
+* <tt>raw(a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87)</tt>
+** <tt>a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87</tt>
+* <tt>addr(3PUNyaW7M55oKWJ3kDukwk9bsKvryra15j)</tt>
+** <tt>a914eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee87</tt>
+
+Invalid descriptors
+
+* Non-hex script: <tt>raw(asdf)</tt>
+* Invalid address: <tt>addr(asdf)</tt>
+* <tt>raw</tt> nested in <tt>sh</tt>: <tt>sh(raw(deadbeef))</tt>
+* <tt>raw</tt> nested in <tt>wsh</tt>: <tt>wsh(raw(deadbeef))</tt>
+* <tt>addr</tt> nested in <tt>sh</tt>: <tt>sh(addr(3PUNyaW7M55oKWJ3kDukwk9bsKvryra15j))</tt>
+* <tt>addr</tt> nested in <tt>wsh</tt>: <tt>wsh(addr(3PUNyaW7M55oKWJ3kDukwk9bsKvryra15j))</tt>
==Backwards Compatibility==
diff --git a/bip-0386.mediawiki b/bip-0386.mediawiki
index d90e801..759887d 100644
--- a/bip-0386.mediawiki
+++ b/bip-0386.mediawiki
@@ -3,7 +3,7 @@
Layer: Applications
Title: tr() Output Script Descriptors
Author: Pieter Wuille <pieter@wuille.net>
- Andrew Chow <andrew@achow101.com>
+ Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0386
Status: Draft
@@ -84,7 +84,28 @@ An additional key expression is defined only for use within a <tt>tr()</tt> desc
==Test Vectors==
-TBD
+Valid descriptors followed by the scripts they produce. Descriptors involving derived child keys will have the 0th, 1st, and 2nd scripts listed.
+
+* <tt>tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)</tt>
+** <tt>512077aab6e066f8a7419c5ab714c12c67d25007ed55a43cadcacb4d7a970a093f11</tt>
+* <tt>tr(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)</tt>
+** <tt>512077aab6e066f8a7419c5ab714c12c67d25007ed55a43cadcacb4d7a970a093f11</tt>
+* <tt>tr(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/0/*,pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/1/*))</tt>
+** <tt>512078bc707124daa551b65af74de2ec128b7525e10f374dc67b64e00ce0ab8b3e12</tt>
+** <tt>512001f0a02a17808c20134b78faab80ef93ffba82261ccef0a2314f5d62b6438f11</tt>
+** <tt>512021024954fcec88237a9386fce80ef2ced5f1e91b422b26c59ccfc174c8d1ad25</tt>
+* <tt>tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pk(669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0))</tt>
+** <tt>512017cf18db381d836d8923b1bdb246cfcd818da1a9f0e6e7907f187f0b2f937754</tt>
+* <tt>tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,{pk(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/0),{{pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL),pk(02df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076)},pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)}})</tt>
+** <tt>512071fff39599a7b78bc02623cbe814efebf1a404f5d8ad34ea80f213bd8943f574</tt>
+
+Invalid Descriptors
+
+* Uncompressed private key: <tt>tr(5kyzdueo39z3fprtux2qbbwgnnp5ztd7yyr2sc1j299sbcnwjss)</tt>
+* Uncompressed public key: <tt>tr(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)</tt>
+* <tt>tr()</tt> nested in <tt>wsh</tt>: <tt>wsh(tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>tr()</tt> nested in <tt>sh</tt>: <tt>sh(tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))</tt>
+* <tt>pkh()</tt> nested in <tt>tr</tt>: <tt>tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd, pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))</tt>
==Backwards Compatibility==
diff --git a/bip-0389.mediawiki b/bip-0389.mediawiki
new file mode 100644
index 0000000..500d7e3
--- /dev/null
+++ b/bip-0389.mediawiki
@@ -0,0 +1,109 @@
+<pre>
+ BIP: 389
+ Layer: Applications
+ Title: Multipath Descriptor Key Expressions
+ Author: Ava Chow <me@achow101.com>
+ Comments-Summary: No comments yet.
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0389
+ Status: Draft
+ Type: Informational
+ Created: 2022-07-26
+ License: BSD-2-Clause
+</pre>
+
+==Abstract==
+
+This document specifies a modification to Key Expressions of Descriptors that are described in BIP 380.
+This modification allows Key Expressions to indicate BIP 32 derivation path steps that can have multiple values.
+
+==Copyright==
+
+This BIP is licensed under the BSD 2-clause license.
+
+==Motivation==
+
+Descriptors can describe the scripts that are used in a wallet, but wallets often require at least two descriptors for all of the scripts that they watch for.
+Wallets typically have one descriptor for producing receiving addresses, and the other for change addresses.
+These descriptors are often extremely similar - they produce the same types of scripts, derive keys from the same master key, and use derivation paths that are almost identical.
+The only differences are in the derivation path where one of the steps will be different between the descriptors.
+Thus it is useful to have a notation to represent both descriptors as a single descriptor where one of the derivation steps is a pair of values.
+
+==Specification==
+
+For extended keys and their derivations paths in a Key Expression, BIP 380 states:
+
+* <tt>xpub</tt> encoded extended public key or <tt>xprv</tt> encoded extended private key (as defined in BIP 32)
+** Followed by zero or more <tt>/NUM</tt> or <tt>/NUMh</tt> path elements indicating BIP 32 derivation steps to be taken after the given extended key.
+** Optionally followed by a single <tt>/*</tt> or <tt>/*h</tt> final step to denote all direct unhardened or hardened children.
+
+This is modifed to state:
+
+* <tt>xpub</tt> encoded extended public key or <tt>xprv</tt> encoded extended private key (as defined in BIP 32)
+** Followed by zero or more <tt>/NUM</tt> (may be followed by <tt>h</tt>, <tt>H</tt>, or <tt>'</tt> to indicate a hardened step) path elements indicating BIP 32 derivation steps to be taken after the given extended key.
+** Followed by zero or one <tt>/<NUM;NUM</tt> (each <tt>NUM</tt> may be followed by <tt>h</tt>, <tt>H</tt>, or <tt>'</tt> to indicate a hardened step) path element indicating a tuple of BIP 32 derivation steps to be taken after the given extended key.
+*** Followed by zero or more <tt>;NUM</tt> (may be followed by <tt>h</tt>, <tt>H</tt>, or <tt>'</tt> to indicate a hardened step) additional tuple values of BIP 32 derivation steps
+*** Followed by a single <tt>>/</tt>
+** Followed by zero or more <tt>/NUM</tt> (may be followed by <tt>h</tt>, <tt>H</tt>, or <tt>'</tt> to indicate a hardened step) path elements indicating BIP 32 derivation steps to be taken after the given extended key.
+** Optionally followed by a single <tt>/*</tt> (may be followed by <tt>h</tt>, <tt>H</tt>, or <tt>'</tt> to indicate a hardened step) final step to denote all direct unhardened or hardened children.
+
+When a <tt>/<NUM;NUM;...;NUM></tt> is encountered, parsers should account for a presence of multiple descriptors where the first descriptor uses the first <tt>NUM</tt>, and a second descriptor uses the second <tt>NUM</tt>, and so on, until each <tt>NUM</tt> is accounted for in the production of public keys, scripts, and addresses, as well as descriptor import and export operations.
+Descriptors that contain multiple Key Expressions that each have a <tt>/<NUM;NUM;...;NUM></tt> must have tuples of exactly the same length so that they are derived in lockstep in the same way that <tt>/*</tt> paths in multiple Key expressions are handled.
+
+The common use case for this is to represent descriptors for producing receiving and change addresses.
+When interpreting for this use case, wallets should use the first descriptor for producing receiving addresses, and the second descriptor for producing change addresses.
+For this use case, the element will commonly be the value <tt>/<0;1></tt>
+
+Note that only one <tt>/<NUM;NUM;...;NUM></tt> specifier is allowed in a Key Expression.
+
+==Test Vectors==
+
+Valid multipath descriptors followed by the descriptors they expand into as sub-bullets
+
+* <tt>pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/<0;1>)</tt>
+** <tt>pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)</tt>
+** <tt>pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/1)</tt>
+* <tt>pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/<2147483647h;0>/0)</tt>
+** <tt>pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647h/0)</tt>
+** <tt>pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/0/0)</tt>
+* <tt>wpkh([ffffffff/13h]xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/<1;3>/2/*</tt>
+** <tt>wpkh([ffffffff/13h]xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)</tt>
+** <tt>wpkh([ffffffff/13h]xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/3/2/*)</tt>
+* <tt>multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/<1;2>/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/<3;4>/0/*)</tt>
+** <tt>multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/1/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/3/0/*)</tt>
+** <tt>multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/2/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/4/0/*)</tt>
+* <tt>pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<0;1;2>)</tt>
+** <tt>pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/0)</tt>
+** <tt>pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1)</tt>
+** <tt>pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2)</tt>
+* <tt>sh(multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/<1;2;3>/0/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/*,xpub661MyMwAqRbcGDZQUKLqmWodYLcoBQnQH33yYkkF3jjxeLvY8qr2wWGEWkiKFaaQfJCoi3HeEq3Dc5DptfbCyjD38fNhSqtKc1UHaP4ba3t/0/0/<3;4;5>/*))</tt>
+** <tt>sh(multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/1/0/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/*,xpub661MyMwAqRbcGDZQUKLqmWodYLcoBQnQH33yYkkF3jjxeLvY8qr2wWGEWkiKFaaQfJCoi3HeEq3Dc5DptfbCyjD38fNhSqtKc1UHaP4ba3t/0/0/3/*))</tt>
+** <tt>sh(multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/2/0/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/*,xpub661MyMwAqRbcGDZQUKLqmWodYLcoBQnQH33yYkkF3jjxeLvY8qr2wWGEWkiKFaaQfJCoi3HeEq3Dc5DptfbCyjD38fNhSqtKc1UHaP4ba3t/0/0/4/*))</tt>
+** <tt>sh(multi(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/3/0/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/*,xpub661MyMwAqRbcGDZQUKLqmWodYLcoBQnQH33yYkkF3jjxeLvY8qr2wWGEWkiKFaaQfJCoi3HeEq3Dc5DptfbCyjD38fNhSqtKc1UHaP4ba3t/0/0/5/*))</tt>
+
+Invalid descriptors
+
+* Multiple multipath specifiers: <tt>pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/<0;1>/<2;3>)</tt>
+* Multipath specifier in origin: <tt>pkh([deadbeef/<0;1>]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/0)</tt>
+* Multipath specifiers of mismatched lengths: <tt>tr(xpub661MyMwAqRbcF3yVrV2KyYetLMYA5mCbv4BhrKwUrFE9LZM6JRR1AEt8Jq4V4C8LwtTke6YEEdCZqgXp85YRk2j74EfJKhe3QybQ9kcUjs4/<6;7;8;9>/*,{pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/<1;2;3>/0/*),pk(xpub661MyMwAqRbcGDZQUKLqmWodYLcoBQnQH33yYkkF3jjxeLvY8qr2wWGEWkiKFaaQfJCoi3HeEq3Dc5DptfbCyjD38fNhSqtKc1UHaP4ba3t/0/0/<3;4;5>/*)})</tt>
+* Multipath specifiers of mismatched lengths: <tt>sh(multi(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/<1;2;3>/0/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/*,xprv9s21ZrQH143K3jUwNHoqQNrtzJnJmx4Yup8NkNLdVQCymYbPbJXnPhwkfTfxZfptcs3rLAPUXS39oDLgrNKQGwbGsEmJJ8BU3RzQuvShEG4/0/0/<3;4>/*))</tt>
+* Empty multipath specifier: <tt>wpkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<>/*)</tt>
+* Missing multipath start: <tt>wpkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/0>/*)</tt>
+* Missing multipath end: <tt>wpkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<0/*)</tt>
+* Missing index in multipath specifier: <tt>wpkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<0;>/*)</tt>
+
+==Backwards Compatibility==
+
+This is an addition to the Key Expressions defined in BIP 380.
+Key Expressions using the format described in BIP 380 are compatible with this modification and parsers that implement this will still be able to parse such descriptors.
+However as this is an addition to Key Expressions, older parsers will not be able to understand such descriptors.
+
+This modification to Key Expressions uses two new characters: <tt><</tt> and <tt>;</tt>.
+These are part of the descriptor character set and so are covered by the checksum algorithm.
+As these are previously unused characters, old parsers will not accidentally mistake them for indicating something else.
+
+This proposal is in contrast to similar proposals such as BIP 88 which allow for multiple derivation indexes in a single element.
+This limitation exists in order to reduce the number of descriptors that are expanded, avoid confusion about how to expand the descriptor, and avoid having expanded descriptors that users are not expecting.
+
+==Reference Implementation==
+
+https://github.com/bitcoin/bitcoin/pull/22838
diff --git a/scripts/buildtable.pl b/scripts/buildtable.pl
index 53a126c..292f1ee 100755
--- a/scripts/buildtable.pl
+++ b/scripts/buildtable.pl
@@ -89,7 +89,7 @@ my %DefinedLicenses = (
);
my %GrandfatheredPD = map { $_ => undef } qw(9 36 37 38 42 49 50 60 65 67 69 74 80 81 83 90 99 105 107 109 111 112 113 114 122 124 125 126 130 131 132 133 140 141 142 143 144 146 147 150 151 152);
my %TolerateMissingLicense = map { $_ => undef } qw(1 10 11 12 13 14 15 16 21 31 33 34 35 39 43 44 45 47 61 64 68 70 71 72 73 101 102 106 120 121);
-my %TolerateTitleTooLong = map { $_ => undef } qw(39 44 45 47 49 60 67 68 69 73 74 75 80 81 99 105 106 109 113 122 126 131 143 145 147 173);
+my %TolerateTitleTooLong = map { $_ => undef } qw(39 44 45 47 49 60 67 68 69 73 74 75 80 81 99 105 106 109 113 122 126 131 143 145 147 173 327);
my %emails;
@@ -125,7 +125,7 @@ while (++$bipnum <= $topbip) {
} elsif ($field eq 'Title') {
$title = $val;
my $title_len = length($title);
- die "$fn has too-long TItle ($title_len > 44 char max)" if $title_len > 44 and not exists $TolerateTitleTooLong{$bipnum};
+ die "$fn has too-long Title ($title_len > 44 char max)" if $title_len > 44 and not exists $TolerateTitleTooLong{$bipnum};
} elsif ($field eq 'Author') {
$val =~ m/^(\S[^<@>]*\S) \<([^@>]*\@[\w.-]+\.\w+)\>$/ or die "Malformed Author line in $fn";
my ($authorname, $authoremail) = ($1, $2);
diff --git a/scripts/diffcheck.sh b/scripts/diffcheck.sh
new file mode 100755
index 0000000..4e4c459
--- /dev/null
+++ b/scripts/diffcheck.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/after.diff || true
+if git checkout HEAD^ && scripts/buildtable.pl >/tmp/table.mediawiki 2>/dev/null; then
+ diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/before.diff || true
+ newdiff=$(diff -s /tmp/before.diff /tmp/after.diff -u | grep '^+')
+ if [ -n "$newdiff" ]; then
+ echo "$newdiff"
+ exit 1
+ fi
+else
+ echo 'Cannot build previous commit table for comparison'
+fi