summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/github-action-checks.yml19
-rw-r--r--.travis.yml7
-rw-r--r--README.mediawiki64
-rw-r--r--bip-0011.mediawiki2
-rw-r--r--bip-0012.mediawiki2
-rw-r--r--bip-0070.mediawiki2
-rw-r--r--bip-0084.mediawiki2
-rw-r--r--bip-0093.mediawiki599
-rw-r--r--bip-0118.mediawiki44
-rw-r--r--bip-0151.mediawiki3
-rw-r--r--bip-0158.mediawiki6
-rw-r--r--bip-0174.mediawiki28
-rw-r--r--bip-0176.mediawiki14
-rw-r--r--bip-0300.mediawiki229
-rw-r--r--bip-0300/m1-cli.pngbin164710 -> 184284 bytes
-rw-r--r--bip-0300/m1-gui.jpgbin92185 -> 90712 bytes
-rw-r--r--bip-0301.mediawiki14
-rw-r--r--bip-0324.mediawiki590
-rw-r--r--bip-0324/ellswift_decode_test_vectors.csv77
-rw-r--r--bip-0324/garbage_terminator.pngbin0 -> 267163 bytes
-rw-r--r--bip-0324/gen_test_vectors.py418
-rw-r--r--bip-0324/packet_encoding_test_vectors.csv8
-rw-r--r--bip-0324/reference.py649
-rw-r--r--bip-0324/run_test_vectors.py69
-rw-r--r--bip-0324/secp256k1_test_vectors.py52
-rw-r--r--bip-0324/test_sage_decoding.py78
-rw-r--r--bip-0324/xswiftec_inv_test_vectors.csv33
-rw-r--r--bip-0324/xswiftec_test_vectors.csv33
-rw-r--r--bip-0327.mediawiki829
-rw-r--r--bip-0327/gen_vectors_helper.py184
-rw-r--r--bip-0327/reference.py880
-rwxr-xr-xbip-0327/tests.sh8
-rw-r--r--bip-0327/vectors/det_sign_vectors.json144
-rw-r--r--bip-0327/vectors/key_agg_vectors.json88
-rw-r--r--bip-0327/vectors/key_sort_vectors.json18
-rw-r--r--bip-0327/vectors/nonce_agg_vectors.json51
-rw-r--r--bip-0327/vectors/nonce_gen_vectors.json44
-rw-r--r--bip-0327/vectors/sig_agg_vectors.json151
-rw-r--r--bip-0327/vectors/sign_verify_vectors.json212
-rw-r--r--bip-0327/vectors/tweak_vectors.json84
-rw-r--r--bip-0329.mediawiki145
-rw-r--r--bip-0330.mediawiki200
-rw-r--r--bip-0330/bisection.pngbin60787 -> 0 bytes
-rw-r--r--bip-0330/recon_scheme_merged.pngbin113169 -> 42425 bytes
-rw-r--r--bip-0340.mediawiki63
-rw-r--r--bip-0340/reference.py9
-rw-r--r--bip-0340/test-vectors.csv4
-rw-r--r--bip-0340/test-vectors.py20
-rw-r--r--bip-0341.mediawiki25
-rw-r--r--bip-0342.mediawiki2
-rw-r--r--bip-0343.mediawiki2
-rw-r--r--bip-0350.mediawiki2
-rw-r--r--bip-0351.mediawiki263
-rw-r--r--bip-0370.mediawiki26
-rw-r--r--bip-0371.mediawiki4
-rw-r--r--bip-0372.mediawiki191
-rw-r--r--bip-0380.mediawiki1
-rwxr-xr-xscripts/buildtable.pl6
-rwxr-xr-xscripts/diffcheck.sh13
59 files changed, 6366 insertions, 345 deletions
diff --git a/.github/workflows/github-action-checks.yml b/.github/workflows/github-action-checks.yml
new file mode 100644
index 0000000..bfc014b
--- /dev/null
+++ b/.github/workflows/github-action-checks.yml
@@ -0,0 +1,19 @@
+name: GitHub Actions Check
+run-name: ${{ github.actor }} Checks 🚀
+on: [push, pull_request]
+jobs:
+ Link-Format-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/link-format-chk.sh
+ Build-Table-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
+ Diff-Checks:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: scripts/diffcheck.sh
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 70d339a..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-os: linux
-language: generic
-script:
- - scripts/link-format-chk.sh
- - scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
- - diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/after.diff || true
- - if git checkout HEAD^ && scripts/buildtable.pl >/tmp/table.mediawiki 2>/dev/null; then diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/before.diff || true; newdiff=$(diff -s /tmp/before.diff /tmp/after.diff -u | grep '^+'); if [ -n "$newdiff" ]; then echo "$newdiff"; exit 1; fi; else echo 'Cannot build previous commit table for comparison'; fi
diff --git a/README.mediawiki b/README.mediawiki
index 9a311d5..96b8df3 100644
--- a/README.mediawiki
+++ b/README.mediawiki
@@ -440,7 +440,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| Derivation scheme for P2WPKH based accounts
| Pavol Rusnak
| Informational
-| Draft
+| Final
|-
| [[bip-0085.mediawiki|85]]
| Applications
@@ -484,6 +484,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Final
|-
+| [[bip-0093.mediawiki|93]]
+| Applications
+| codex32: Checksummed SSSS-aware BIP32 seeds
+| Leon Olsson Curr, Pearlwort Sneed
+| Informational
+| Draft
+|-
| [[bip-0098.mediawiki|98]]
| Consensus (soft fork)
| Fast Merkle Trees
@@ -825,7 +832,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| Peer-to-Peer Communication Encryption
| Jonas Schnelli
| Standard
-| Withdrawn
+| Replaced
|- style="background-color: #cfffcf"
| [[bip-0152.mediawiki|152]]
| Peer Services
@@ -980,6 +987,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Karl-Johan Alm
| Standard
| Draft
+|-
+| [[bip-0324.mediawiki|324]]
+| Peer Services
+| Version 2 P2P Encrypted Transport Protocol
+| Dhruv Mehta, Tim Ruffing, Jonas Schnelli, Pieter Wuille
+| Standard
+| Draft
|- style="background-color: #ffffcf"
| [[bip-0325.mediawiki|325]]
| Applications
@@ -990,11 +1004,25 @@ Those proposing changes should consider that ultimately consent may rest with th
|-
| [[bip-0326.mediawiki|326]]
| Applications
-| Anti-fee-sniping protection in taproot transactions
+| Anti-fee-sniping in taproot transactions
| Chris Belcher
| Informational
| Draft
|-
+| [[bip-0327.mediawiki|327]]
+|
+| MuSig2 for BIP340-compatible Multi-Signatures
+| Jonas Nick, Tim Ruffing, Elliott Jin
+| Informational
+| Draft
+|-
+| [[bip-0329.mediawiki|329]]
+| Applications
+| Wallet Labels Export Format
+| Craig Raw
+| Informational
+| Draft
+|-
| [[bip-0330.mediawiki|330]]
| Peer Services
| Transaction announcements reconciliation
@@ -1015,40 +1043,47 @@ Those proposing changes should consider that ultimately consent may rest with th
| Suhas Daftuar
| Standard
| Draft
-|-
+|- style="background-color: #cfffcf"
| [[bip-0340.mediawiki|340]]
|
| Schnorr Signatures for secp256k1
| Pieter Wuille, Jonas Nick, Tim Ruffing
| Standard
-| Draft
-|-
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0341.mediawiki|341]]
| Consensus (soft fork)
| Taproot: SegWit version 1 spending rules
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
-| Draft
-|-
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0342.mediawiki|342]]
| Consensus (soft fork)
| Validation of Taproot Scripts
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
-| Draft
-|- style="background-color: #ffffcf"
+| Final
+|- style="background-color: #cfffcf"
| [[bip-0343.mediawiki|343]]
| Consensus (soft fork)
| Mandatory activation of taproot deployment
| Shinobius, Michael Folkson
| Standard
-| Proposed
+| Final
|-
| [[bip-0350.mediawiki|350]]
| Applications
| Bech32m format for v1+ witness addresses
| Pieter Wuille
| Standard
+| Final
+|-
+| [[bip-0351.mediawiki|351]]
+| Applications
+| Private Payments
+| Alfred Hodler, Clark Moody
+| Informational
| Draft
|-
| [[bip-0370.mediawiki|370]]
@@ -1065,6 +1100,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Draft
|-
+| [[bip-0372.mediawiki|372]]
+| Applications
+| Pay-to-contract tweak fields for PSBT
+| Maxim Orlovsky
+| Standard
+| Draft
+|-
| [[bip-0380.mediawiki|380]]
| Applications
| Output Script Descriptors General Operation
diff --git a/bip-0011.mediawiki b/bip-0011.mediawiki
index 8375f55..7e9e1f6 100644
--- a/bip-0011.mediawiki
+++ b/bip-0011.mediawiki
@@ -54,7 +54,7 @@ A weaker argument is OP_CHECKMULTISIG should not be used because it pops one too
OP_CHECKMULTISIG is already supported by old clients and miners as a non-standard transaction type.
-https://github.com/gavinandresen/bitcoin-git/tree/op_eval
+https://github.com/gavinandresen/bitcoin-git/tree/77f21f1583deb89bf3fffe80fe9b181fedb1dd60
== Post History ==
diff --git a/bip-0012.mediawiki b/bip-0012.mediawiki
index 9cb3795..70069d6 100644
--- a/bip-0012.mediawiki
+++ b/bip-0012.mediawiki
@@ -75,7 +75,7 @@ Example of a transaction that must fail for both old and new miners/clients:
==Reference Implementation==
-https://github.com/gavinandresen/bitcoin-git/tree/op_eval
+https://github.com/gavinandresen/bitcoin-git/tree/77f21f1583deb89bf3fffe80fe9b181fedb1dd60
==See Also==
diff --git a/bip-0070.mediawiki b/bip-0070.mediawiki
index 28349ee..fce6023 100644
--- a/bip-0070.mediawiki
+++ b/bip-0070.mediawiki
@@ -314,7 +314,7 @@ http://datatracker.ietf.org/wg/jose/
Wikipedia's page on Invoices: http://en.wikipedia.org/wiki/Invoice
especially the list of Electronic Invoice standards
-sipa's payment protocol proposal: https://gist.github.com/1237788
+sipa's payment protocol proposal: https://gist.github.com/sipa/1237788
ThomasV's "Signed Aliases" proposal : http://ecdsa.org/bitcoin_URIs.html
diff --git a/bip-0084.mediawiki b/bip-0084.mediawiki
index dc5a05d..7f20217 100644
--- a/bip-0084.mediawiki
+++ b/bip-0084.mediawiki
@@ -5,7 +5,7 @@
Author: Pavol Rusnak <stick@satoshilabs.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0084
- Status: Draft
+ Status: Final
Type: Informational
Created: 2017-12-28
License: CC0-1.0
diff --git a/bip-0093.mediawiki b/bip-0093.mediawiki
new file mode 100644
index 0000000..da349fd
--- /dev/null
+++ b/bip-0093.mediawiki
@@ -0,0 +1,599 @@
+<pre>
+ BIP: 93
+ Layer: Applications
+ Title: codex32: Checksummed SSSS-aware BIP32 seeds
+ Author: Leon Olsson Curr and Pearlwort Sneed <pearlwort@wpsoftware.net>
+ Andrew Poelstra <andrew.poelstra@gmail.com>
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0093
+ Status: Draft
+ Type: Informational
+ Created: 2023-02-13
+ License: BSD-3-Clause
+ Post-History: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2023-February/021469.html
+</pre>
+
+==Introduction==
+
+===Abstract===
+
+This document describes a standard for backing up and restoring the master seed of a
+[https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP-0032] hierarchical deterministic wallet, using Shamir's secret sharing.
+It includes an encoding format, a BCH error-correcting checksum, and algorithms for share generation and secret recovery.
+Secret data can be split into up to 31 shares.
+A minimum threshold of shares, which can be between 1 and 9, is needed to recover the secret, whereas without sufficient shares, no information about the secret is recoverable.
+
+===Copyright===
+
+This document is licensed under the 3-clause BSD license.
+
+===Motivation===
+
+BIP-0032 master seed data is the source entropy used to derive all private keys in an HD wallet.
+Safely storing this secret data is the hardest and most important part of self-custody.
+However, there is a tension between security, which demands limiting the number of backups, and resilience, which demands widely replicated backups.
+Encrypting the seed does not change this fundamental tradeoff, since it leaves essentially the same problem of how to back up the encryption key(s).
+
+To allow users freedom to make this tradeoff, we use Shamir's secret sharing, which guarantees that any number of shares less than the threshold leaks no information about the secret.
+This approach allows increasing safety by widely distributing the generated shares, while also providing security against the compromise of one or more shares (as long as fewer than the threshold have been compromised).
+
+[https://github.com/satoshilabs/slips/blob/master/slip-0039.md SLIP-0039] has essentially the same motivations as this standard.
+However, unlike SLIP-0039,
+
+* this standard aims to be simple enough for hand computation
+* we use the bech32 alphabet rather than a word list, resulting in fixed-length compact encodings
+* we do not support multi-level secret sharing (splitting of shares), although it is technically possible and may be added in a future BIP
+* because of the need to support hand computation, we '''do not''' support passphrases or key hardening
+
+Users who demand a higher level of security for particular secrets, or have a general distrust in digital electronic devices, have the option of using hand computation to backup and restore secret data in an interoperable manner.
+In particular, all computations can be done with simple lookup tables.
+'''It is therefore possible to compute and verify checksums, and to split and recover seeds, entirely using pen and paper.'''
+For long-lived rarely-used seeds, the ability to hand-verify checksums has a significant benefit even for users who do not care to do any other part of this process by hand.
+It means that they can verify the integrity (against non-malicious tampering) of their shares regularly, say, on an annual basis, without needing to continually expose secret data to new hardware.
+
+The ability to compute properties by hand comes from our choice of a small field and our use of linear error correcting codes.
+It does not come with any reduction in security, as long as users use high-quality randomness.
+Note that hand computation is optional, the particular details of hand computation are outside the scope of this standard, and implementers do not need to be concerned with this possibility.
+
+[https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki BIP-0039] serves the same purpose as this standard: encoding master seeds for storage by users.
+However, BIP-0039 has no error-correcting ability, cannot sensibly be extended to support secret sharing, has no support for versioning or other metadata, and has many technical design decisions that make implementation and interoperability difficult (for example, the use of SHA-512 to derive seeds, or the use of 11-bit words).
+
+==Specification==
+
+===codex32===
+
+A codex32 string is similar to a bech32 string defined in [https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki BIP-0173].
+It reuses the base-32 character set from BIP-0173, and consists of:
+
+* A human-readable part, which is the string "ms" (or "MS").
+* A separator, which is always "1".
+* A data part which is in turn subdivided into:
+** A threshold parameter, which MUST be a single digit between "2" and "9", or the digit "0".
+*** If the threshold parameter is "0" then the share index, defined below, MUST have a value of "s" (or "S").
+** An identifier consisting of 4 bech32 characters.
+** A share index, which is any bech32 character. Note that a share index value of "s" (or "S") is special and denotes the unshared secret (see section "Unshared Secret").
+** A payload which is a sequence of up to 74 bech32 characters. (However, see '''Long codex32 Strings''' below for an exception to this limit.)
+** A checksum which consists of 13 bech32 characters as described below.
+
+As with bech32 strings, a codex32 string MUST be entirely uppercase or entirely lowercase.
+For presentation, lowercase is usually preferable, but uppercase SHOULD be used for handwritten codex32 strings.
+If a codex32 string is encoded in a QR code, it SHOULD use the uppercase form, as this is encoded more compactly.
+
+===Checksum===
+
+The last thirteen characters of the data part form a checksum and contain no information.
+Valid strings MUST pass the criteria for validity specified by the Python 3 code snippet below.
+The function <code>ms32_verify_checksum</code> must return true when its argument is the data part as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
+
+To construct a valid checksum given the data-part characters (excluding the checksum), the <code>ms32_create_checksum</code> function can be used.
+
+<source lang="python">
+MS32_CONST = 0x10ce0795c2fd1e62a
+
+def ms32_polymod(values):
+ GEN = [
+ 0x19dc500ce73fde210,
+ 0x1bfae00def77fe529,
+ 0x1fbd920fffe7bee52,
+ 0x1739640bdeee3fdad,
+ 0x07729a039cfc75f5a,
+ ]
+ residue = 0x23181b3
+ for v in values:
+ b = (residue >> 60)
+ residue = (residue & 0x0fffffffffffffff) << 5 ^ v
+ for i in range(5):
+ residue ^= GEN[i] if ((b >> i) & 1) else 0
+ return residue
+
+def ms32_verify_checksum(data):
+ if len(data) >= 96: # See Long codex32 Strings
+ return ms32_verify_long_checksum(data)
+ if len(data) <= 93:
+ return ms32_polymod(data) == MS32_CONST
+ return False
+
+def ms32_create_checksum(data):
+ if len(data) > 80: # See Long codex32 Strings
+ return ms32_create_long_checksum(data)
+ values = data
+ polymod = ms32_polymod(values + [0] * 13) ^ MS32_CONST
+ return [(polymod >> 5 * (12 - i)) & 31 for i in range(13)]
+</source>
+
+===Error Correction===
+
+A codex32 string without a valid checksum MUST NOT be used.
+The checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 13 consecutive erasures.
+Implementations SHOULD provide the user with a corrected valid codex32 string if possible.
+However, implementations SHOULD NOT automatically proceed with a corrected codex32 string without user confirmation of the corrected string, either by prompting the user, or returning a corrected string in an error message and allowing the user to repeat their action.
+We do not specify how an implementation should implement error correction. However, we recommend that:
+
+* Implementations make suggestions to substitute non-bech32 characters with bech32 characters in some situations, such as replacing "B" with "8", "O" with "0", "I" with "l", etc.
+* Implementations interpret "?" as an erasure.
+* Implementations optionally interpret other non-bech32 characters, or characters with incorrect case, as erasures.
+* If a string with 8 or fewer erasures can have those erasures filled in to make a valid codex32 string, then the implementation suggests such a string as a correction.
+* If a string consisting of valid bech32 characters in the proper case can be made valid by substituting 4 or fewer characters, then the implementation suggests such a string as a correction.
+
+===Unshared Secret===
+
+When the share index of a valid codex32 string (converted to lowercase) is the letter "s", we call the string a codex32 secret.
+The payload in a codex32 secret is a direct encoding of a BIP-0032 HD master seed.
+
+The master seed is decoded by converting the payload to bytes:
+
+* Translate the characters to 5 bits values using the bech32 character table from BIP-0173, most significant bit first.
+* Re-arrange those bits into groups of 8 bits. Any incomplete group at the end MUST be 4 bits or less, and is discarded.
+
+Note that unlike the decoding process in BIP-0173, we do NOT require that the incomplete group be all zeros.
+
+For an unshared secret, the threshold parameter (the first character of the data part) is ignored (beyond the fact it must be a digit for the codex32 string to be valid).
+We recommend using the digit "0" for the threshold parameter in this case.
+The 4 character identifier also has no effect beyond aiding users in distinguishing between multiple different master seeds in cases where they have more than one.
+
+===Recovering Master Seed===
+
+When the share index of a valid codex32 string (converted to lowercase) is not the letter "s", we call the string an codex32 share.
+The first character of the data part indicates the threshold of the share, and it is required to be a non-"0" digit.
+
+In order to recover a master seed, one needs a set of valid codex32 shares such that:
+
+* All shares have the same threshold value, the same identifier, and the same length.
+* All of the share index values are distinct.
+* The number of codex32 shares is exactly equal to the (common) threshold value.
+
+If all the above conditions are satisfied, the <code>ms32_recover</code> function will return a codex32 secret when its argument is the list of codex32 shares with each share represented as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
+
+<source lang="python">
+bech32_inv = [
+ 0, 1, 20, 24, 10, 8, 12, 29, 5, 11, 4, 9, 6, 28, 26, 31,
+ 22, 18, 17, 23, 2, 25, 16, 19, 3, 21, 14, 30, 13, 7, 27, 15,
+]
+
+def bech32_mul(a, b):
+ res = 0
+ for i in range(5):
+ res ^= a if ((b >> i) & 1) else 0
+ a *= 2
+ a ^= 41 if (32 <= a) else 0
+ return res
+
+def bech32_lagrange(l, x):
+ n = 1
+ c = []
+ for i in l:
+ n = bech32_mul(n, i ^ x)
+ m = 1
+ for j in l:
+ m = bech32_mul(m, (x if i == j else i) ^ j)
+ c.append(m)
+ return [bech32_mul(n, bech32_inv[i]) for i in c]
+
+def ms32_interpolate(l, x):
+ w = bech32_lagrange([s[5] for s in l], x)
+ res = []
+ for i in range(len(l[0])):
+ n = 0
+ for j in range(len(l)):
+ n ^= bech32_mul(w[j], l[j][i])
+ res.append(n)
+ return res
+
+def ms32_recover(l):
+ return ms32_interpolate(l, 16)
+</source>
+
+===Generating Shares===
+
+If we already have ''t'' valid codex32 strings such that:
+
+* All strings have the same threshold value ''t'', the same identifier, and the same length
+* All of the share index values are distinct
+
+Then we can derive additional shares with the <code>ms32_interpolate</code> function by passing it a list of exactly ''t'' of these codex32 strings, together with a fresh share index distinct from all of the existing share indexes.
+The newly derived share will have the provided share index.
+
+Once a user has generated ''n'' codex32 shares, they may discard the codex32 secret (if it exists).
+The ''n'' shares form a ''t'' of ''n'' Shamir's secret sharing scheme of a codex32 secret.
+
+There are two ways to create an initial set of ''t'' valid codex32 strings, depending on whether the user already has an existing master seed to split.
+
+====For a fresh master seed====
+
+In the case that the user wishes to generate a fresh master seed, the user generates random initial shares, as follows:
+
+# Choose a bitsize, between 128 and 512, which must be a multiple of 8.
+# Choose a threshold value ''t'' between 2 and 9, inclusive
+# Choose a 4 bech32 character identifier
+#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
+# ''t'' many times, generate a random share by:
+## Take the next available letter from the bech32 alphabet, in alphabetical order, as <code>a</code>, <code>c</code>, <code>d</code>, ..., to be the share index
+## Set the first nine characters to be the prefix <code>ms1</code>, the threshold value ''t'', the 4-character identifier, and then the share index
+## Choose the next ceil(''bitlength / 5'') characters uniformly at random
+## Generate a valid checksum in accordance with the Checksum section, and append this to the resulting shares
+
+The result will be ''t'' distinct shares, all with the same initial 8 characters, and a distinct share index as the 9th character.
+
+With this set of ''t'' codex32 shares, new shares can be derived as discussed above. This process generates a fresh master seed, whose value can be retrieved by running the recovery process on any ''t'' of these shares.
+
+====For an existing master seed====
+
+Before generating shares for an existing master seed, it first must be converted into a codex32 secret, as described above.
+The conversion process consists of:
+
+# Choose a threshold value ''t'' between 2 and 9, inclusive
+# Choose a 4 bech32 character identifier
+#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
+# Set the share index to <code>s</code>
+# Set the payload to a bech32 encoding of the master seed, padded with arbitrary bits
+# Generating a valid checksum in accordance with the Checksum section
+
+Along with the codex32 secret, the user must generate ''t''-1 other codex32 shares, each with the same threshold value, the same identifier, and a distinct share index.
+These shares should be generated as described in the "fresh master seed" section.
+
+The codex32 secret and the ''t''-1 codex32 shares form a set of ''t'' valid codex32 strings from which additional shares can be derived as described above.
+
+===Long codex32 Strings===
+
+The 13 character checksum design only supports up to 80 data characters.
+Excluding the threshold, identifier and index characters, this limits the payload to 74 characters or 46 bytes.
+While this is enough to support the 32-byte advised size of BIP-0032 master seeds, BIP-0032 allows seeds to be up to 64 bytes in size.
+We define a long codex32 string format to support these longer seeds by defining an alternative checksum.
+
+<source lang="python">
+MS32_LONG_CONST = 0x43381e570bf4798ab26
+
+def ms32_long_polymod(values):
+ GEN = [
+ 0x3d59d273535ea62d897,
+ 0x7a9becb6361c6c51507,
+ 0x543f9b7e6c38d8a2a0e,
+ 0x0c577eaeccf1990d13c,
+ 0x1887f74f8dc71b10651,
+ ]
+ residue = 0x23181b3
+ for v in values:
+ b = (residue >> 70)
+ residue = (residue & 0x3fffffffffffffffff) << 5 ^ v
+ for i in range(5):
+ residue ^= GEN[i] if ((b >> i) & 1) else 0
+ return residue
+
+def ms32_verify_long_checksum(data):
+ return ms32_long_polymod(data) == MS32_LONG_CONST
+
+def ms32_create_long_checksum(data):
+ values = data
+ polymod = ms32_long_polymod(values + [0] * 15) ^ MS32_LONG_CONST
+ return [(polymod >> 5 * (14 - i)) & 31 for i in range(15)]
+</source>
+
+A long codex32 string follows the same specification as a regular codex32 string with the following changes.
+
+* The payload is a sequence of between 75 and 103 bech32 characters.
+* The checksum consists of 15 bech32 characters as defined above.
+
+A codex32 string with a data part of 94 or 95 characters is never legal as a regular codex32 string is limited to 93 data characters and a long codex32 string is at least 96 characters.
+
+Generation of long shares and recovery of the master seed from long shares proceeds in exactly the same way as for regular shares with the <code>ms32_interpolate</code> function.
+
+The long checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 15 consecutive erasures.
+As with regular checksums we do not specify how an implementation should implement error correction, and all our recommendations for error correction of regular codex32 strings also apply to long codex32 strings.
+
+==Rationale==
+
+This scheme is based on the observation that the Lagrange interpolation of valid codewords in a BCH code will always be a valid codeword.
+This means that derived shares will always have valid checksum, and a sufficient threshold of shares with valid checksums will derive a secret with a valid checksum.
+
+The header system is also compatible with Lagrange interpolation, meaning all derived shares will have the same identifier and will have the appropriate share index.
+This fact allows the header data to be covered by the checksum.
+
+The checksum size and identifier size have been chosen so that the encoding of 128-bit seeds and shares fit within 48 characters.
+This is a standard size for many common seed storage formats, which has been popularized by the 12 four-letter word format of the BIP-0039 mnemonic.
+
+The 13 character checksum is adequate to correct 4 errors in up to 93 characters (80 characters of data and 13 characters of the checksum).
+We can correct up to 8 erasures (errors with known locations), and up to 13 consecutive errors (burst errors).
+Beyond that, our code is guaranteed to detect up to 8 errors.
+More generally, any number of random errors will be detected with overwhelming (1 - 2^65) probability. However, the checksum does not protect against maliciously constructed errors.
+These parameters are slightly better than those of the checksum used in SLIP-0039.
+
+For 256-bit seeds and shares our strings are 74 characters, which fits into the 96 character format of the 24 four-letter word format of the BIP-0039 mnemonic, with plenty of room to spare.
+
+A longer checksum is needed to support up to 512-bit seeds, the longest seed length specified in BIP-0032, as the 13 character checksum isn't adequate for more than 80 data characters.
+While we could use the 15 character checksum for both cases, we prefer to keep the strings as short as possible for the more common cases of 128-bit and 256-bit master seeds.
+We only guarantee to correct 4 characters no matter how long the string is.
+Longer strings mean more chances for transcription errors, so shorter strings are better.
+
+The longest data part using the regular 13 character checksum is 93 characters and corresponds to a 400-bit secret.
+At this length, the prefix <code>MS1</code> is not covered by the checksum.
+This is acceptable because the checksum scheme itself requires you to know that the <code>MS1</code> prefix is being used in the first place.
+If the prefix is damaged and a user is guessing that the data might be using this scheme, then the user can enter the available data explicitly using the suspected <code>MS1</code> prefix.
+
+===Not BIP-0039 Entropy===
+
+Instead of encoding a BIP-0032 master seed, an alternative would be to encode BIP-0039 entropy.
+However this alternative approach is fraught with difficulties.
+
+On approach would be to encode the BIP-0039 entropy along with the BIP-0039 checksum data.
+This data can directly be recovered from the BIP-0039 mnemonic, and the process can be reversed if one knows the target language.
+However, for a 128-bit seed, there is a 4 bit checksum yielding 132 bits of data that needs to be encoded.
+This exceeds the 130-bits of room that we have for storing 128 bit seeds.
+We would have to compromise on the 48 character size, or the size of the headers, or the size of the checksum in order to add room for an additional character of data.
+
+This approach would also eliminate our short cut generation of a fresh master secret from generating random shares.
+One would be required to first generate BIP-0039 entropy, and then add a BIP-0039 checksum, before adding a Codex32 checksum and then generate other shares.
+In particular, this process could no longer be performed by hand since it is effectively impossible to hand compute a BIP-0039 checksum.
+
+An alternative approach is to discard the BIP-0039 checksum, since it is inadequate for error correction anyways, and rely on the Codex32 checksum.
+However, this approach ends up eliminating the benefits of BIP-0039 compatibility.
+While it is now possible to hand generate fresh shares, it is impossible to recover compatible BIP-0039 words by hand because, again, the BIP-0039 checksum is not hand computable.
+The only way of generating the compatible BIP-0039 mnemonic is to use wallet software.
+But if the wallet software is need to support this approach to decoding entropy, we may as well bypass all of the overhead of BIP-0039 and directly encode the entropy of a BIP-0032 master seed, which is what we do in our Codex32 proposal.
+
+Beyond the problems above, BIP-0039 does not define a single transformation from entropy to BIP-0032 master seed.
+Instead every different language has it own word list (or word lists) and each choice of word list yields a different transformation from entropy to master seed.
+We would need to encode the choice of word list in our share's meta-data, which takes up even more room, and is difficult to specify due to the ever-evolving choice of word lists.
+
+Alternatively we could standardize on the choice of the English word list, something that is nearly a de facto standard, and simply be incompatible with BIP-0039 wallets of other languages.
+Such a choice also risks users of BIP-0039 recovering their entropy from their language, encoding it in in Codex32 and then failing to recover their wallet because the English word lists has replaced their language's word list.
+
+The main advantage of this alternative approach would be that wallets could give users an option switch between backing up their entropy as a BIP-0039 mnemonic and in Codex32 format, but again, only if their language choice happens to be the English word list.
+In practice, we do not expect users in switch back and forth between backup formats, and instead just generate a fresh master seed using Codex32.
+
+Seeing little value with BIP-0039 compatibility (English-only), all the difficulties with BIP-0039 language choice, not to mention the PBKDF2 overhead of using BIP-0039, we think it is best to abandon BIP-0039 and encode BIP-0032 master seeds directly.
+Our approach is semi-convertible with BIP-0039's 512-bit master seeds (in all languages, see Backwards Compatibility) and fully interconvertible with SLIP-39 encoded master seeds or any other encoding of BIP-0032 master seeds.
+
+==Backwards Compatibility==
+
+codex32 is an alternative to BIP-0039 and SLIP-0039.
+It is technically possible to derive the BIP32 master seed from seed words encoded in one of these schemes, and then to encode this seed in codex32.
+For BIP-0039 this process is irreversible, since it involves hashing the original words.
+Furthermore, the resulting seed will be 512 bits long, which may be too large to be safely and conveniently handled.
+
+SLIP-0039 seed words can be reversibly converted to master seeds, so it is possible to interconvert between SLIP-0039 and codex32.
+However, SLIP-0039 '''shares''' cannot be converted to codex32 shares because the two schemes use a different underlying field.
+
+The authors of this BIP do not recommend interconversion.
+Instead, users who wish to switch to codex32 should generate a fresh seed and sweep their coins.
+
+==Reference Implementation==
+
+Our [https://github.com/BlockstreamResearch/codex32 reference implementation repository] contains implementations in Rust and PostScript.
+The inline code in this BIP text can be used as a Python reference.
+
+==Test Vectors==
+
+===Test vector 1===
+
+This example shows the codex32 format, when used without splitting the secret into any shares.
+The payload contains 26 bech32 characters, which corresponds to 130 bits. We truncate the last two bits in order to obtain a 128-bit master seed.
+
+codex32 secret (bech32): <code>ms10testsxxxxxxxxxxxxxxxxxxxxxxxxxx4nzvca9cmczlw</code>
+
+Master secret (hex): <code>318c6318c6318c6318c6318c6318c631</code>
+
+* human-readable part: <code>ms</code>
+* separator: <code>1</code>
+* k value: <code>0</code> (no secret splitting)
+* identifier: <code>test</code>
+* share index: <code>s</code> (the secret)
+* payload: <code>xxxxxxxxxxxxxxxxxxxxxxxxxx</code>
+* checksum: <code>4nzvca9cmczlw</code>
+* master node xprv: <code>xprv9s21ZrQH143K3taPNekMd9oV5K6szJ8ND7vVh6fxicRUMDcChr3bFFzuxY8qP3xFFBL6DWc2uEYCfBFZ2nFWbAqKPhtCLRjgv78EZJDEfpL</code>
+
+===Test vector 2===
+
+This example shows generating a new master seed using "random" codex32 shares, as well as deriving an additional codex32 share, using ''k''=2 and an identifier of <code>NAME</code>.
+Although codex32 strings are canonically all lowercase, it's also valid to use all uppercase.
+
+Share with index <code>A</code>: <code>MS12NAMEA320ZYXWVUTSRQPNMLKJHGFEDCAXRPP870HKKQRM</code>
+
+Share with index <code>C</code>: <code>MS12NAMECACDEFGHJKLMNPQRSTUVWXYZ023FTR2GDZMPY6PN</code>
+
+* Derived share with index <code>D</code>: <code>MS12NAMEDLL4F8JLH4E5VDVULDLFXU2JHDNLSM97XVENRXEG</code>
+* Secret share with index <code>S</code>: <code>MS12NAMES6XQGUZTTXKEQNJSJZV4JV3NZ5K3KWGSPHUH6EVW</code>
+* Master secret (hex): <code>d1808e096b35b209ca12132b264662a5</code>
+* master node xprv: <code>xprv9s21ZrQH143K2NkobdHxXeyFDqE44nJYvzLFtsriatJNWMNKznGoGgW5UMTL4fyWtajnMYb5gEc2CgaKhmsKeskoi9eTimpRv2N11THhPTU</code>
+
+Note that per BIP-0173, the lowercase form is used when determining a character's value for checksum purposes.
+In particular, given an all uppercase codex32 string, we still use lowercase <code>ms</code> as the human-readable part during checksum construction.
+
+===Test vector 3===
+
+This example shows splitting an existing 128-bit master seed into "random" codex32 shares, using ''k''=3 and an identifier of <code>cash</code>.
+We appended two zero bits in order to obtain 26 bech32 characters (130 bits of data) from the 128-bit master seed.
+
+Master secret (hex): <code>ffeeddccbbaa99887766554433221100</code>
+
+Secret share with index <code>s</code>: <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
+
+Share with index <code>a</code>: <code>ms13casha320zyxwvutsrqpnmlkjhgfedca2a8d0zehn8a0t</code>
+
+Share with index <code>c</code>: <code>ms13cashcacdefghjklmnpqrstuvwxyz023949xq35my48dr</code>
+
+* Derived share with index <code>d</code>: <code>ms13cashd0wsedstcdcts64cd7wvy4m90lm28w4ffupqs7rm</code>
+* Derived share with index <code>e</code>: <code>ms13casheekgpemxzshcrmqhaydlp6yhms3ws7320xyxsar9</code>
+* Derived share with index <code>f</code>: <code>ms13cashf8jh6sdrkpyrsp5ut94pj8ktehhw2hfvyrj48704</code>
+* master node xprv: <code>xprv9s21ZrQH143K266qUcrDyYJrSG7KA3A7sE5UHndYRkFzsPQ6xwUhEGK1rNuyyA57Vkc1Ma6a8boVqcKqGNximmAe9L65WsYNcNitKRPnABd</code>
+
+Any three of the five shares among <code>acdef</code> can be used to recover the secret.
+
+Note that the choice to append two zero bits was arbitrary, and any of the following four secret shares would have been valid choices.
+However, each choice would have resulted in a different set of derived shares.
+
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qpte35dvzkjpt0r</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qzfatvdwq5692k6</code>
+* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qrsx6ydhed97jx2</code>
+
+===Test vector 4===
+
+This example shows converting a 256-bit secret into a codex32 secret, without splitting the secret into any shares.
+We appended four zero bits in order to obtain 52 bech32 characters (260 bits of data) from the 256-bit secret.
+
+256-bit secret (hex): <code>ffeeddccbbaa99887766554433221100ffeeddccbbaa99887766554433221100</code>
+
+* codex32 secret: <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
+* master node xprv: <code>xprv9s21ZrQH143K3s41UCWxXTsU4TRrhkpD1t21QJETan3hjo8DP5LFdFcB5eaFtV8x6Y9aZotQyP8KByUjgLTbXCUjfu2iosTbMv98g8EQoqr</code>
+
+Note that the choice to append four zero bits was arbitrary, and any of the following sixteen codex32 secrets would have been valid:
+
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqpj82dp34u6lqtd</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqzsrs4pnh7jmpj5</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqrfcpap2w8dqezy</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqy5tdvphn6znrf0</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq9dsuypw2ragmel</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqx05xupvgp4v6qx</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq8k0h5p43c2hzsk</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqgum7hplmjtr8ks</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqf9q0lpxzt5clxq</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq28y48pyqfuu7le</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqt7ly0paesr8x0f</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqvrvg7pqydv5uyz</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqd6hekpea5n0y5j</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqwcnrwpmlkmt9dt</code>
+* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq0pgjxpzx0ysaam</code>
+
+===Test vector 5===
+
+This example shows generating a new 512-bit master seed using "random" codex32 characters and appending a checksum.
+The payload contains 103 bech32 characters, which corresponds to 515 bits. The last three bits are discarded when converting to a 512-bit master seed.
+
+This is an example of a '''Long codex32 String'''.
+
+* Secret share with index <code>S</code>: <code>MS100C8VSM32ZXFGUHPCHTLUPZRY9X8GF2TVDW0S3JN54KHCE6MUA7LQPZYGSFJD6AN074RXVCEMLH8WU3TK925ACDEFGHJKLMNPQRSTUVWXY06FHPV80UNDVARHRAK</code>
+* Master secret (hex): <code>dc5423251cb87175ff8110c8531d0952d8d73e1194e95b5f19d6f9df7c01111104c9baecdfea8cccc677fb9ddc8aec5553b86e528bcadfdcc201c17c638c47e9</code>
+* master node xprv: <code>xprv9s21ZrQH143K4UYT4rP3TZVKKbmRVmfRqTx9mG2xCy2JYipZbkLV8rwvBXsUbEv9KQiUD7oED1Wyi9evZzUn2rqK9skRgPkNaAzyw3YrpJN</code>
+
+===Invalid test vectors===
+
+These examples have incorrect checksums.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghq</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghp</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxlk3yepcstwr</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx6pgnv7jnpcsp</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxx0cpvr7n4geq</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxm5252y7d3lr</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxrd9sukzl05ej</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxc55srw5jrm0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxgc7rwhtudwc</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx4gy22afwghvs</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe8yfm0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvm597d</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pe0</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pew</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxqyadsp3nywm8a</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxzvg7ar4hgaejk</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcznau0advgxqe</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxch3jrc6j5040j</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx52gxl6ppv40mcv</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx7g4g2nhhle8fk</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx63m45uj8ss4x8</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy4r708q7kg65x</code>
+
+These examples use the wrong checksum for their given data sizes.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxurfvwmdcmymdufv</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxcsyppjkd8lz4hx3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxu6hwvl5p0l9xf3c</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxwqey9rfs6smenxa</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxv70wkzrjr4ntqet</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx3hmlrmpa4zl0v</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrfggf88znkaup</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpt7l4aycv9qzj</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxus27z9xtyxyw3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcwm4re8fs78vn</code>
+
+These examples have improper lengths.
+They are either too short, too long, or would decode to byte sequence with an incomplete group greater than 4 bits.
+
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxw0a4c70rfefn4</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxk4pavy5n46nea</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx9lrwar5zwng4w</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxr335l5tv88js3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvu7q9nz8p7dj68v</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpq6k542scdxndq3</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxkmfw6jm270mz6ej</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxzhddxw99w7xws</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx42cux6um92rz</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxarja5kqukdhy9</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxky0ua3ha84qk8</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9eheesxadh2n2n9</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9llwmgesfulcj2z</code>
+* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx02ev7caq6n9fgkf</code>
+
+This example uses a "0" threshold with a non-"s" index
+
+* <code>ms10fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx0z26tfn0ulw3p</code>
+
+This example has a threshold that is not a digit.
+
+* <code>ms1fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxda3kr3s0s2swg</code>
+
+These examples do not begin with the required "ms" or "MS" prefix and/or are missing the "1" separator.
+
+* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
+* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
+* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxx8t28z74x8hs4l</code>
+* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxh9d0fhnvfyx3x</code>
+
+These examples all incorrectly mix upper and lower case characters.
+
+* <code>Ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>mS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>MS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10FAUXsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10fauxSxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
+* <code>ms10fauxsXXXXXXXXXXXXXXXXXXXXXXXXXXuqxkk05lyf3x2</code>
+* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxUQXKK05LYF3X2</code>
+
+==Appendix==
+
+===Mathematical Companion===
+
+Below we use the bech32 character set to denote values in GF[32].
+In bech32, the letter <code>Q</code> denotes zero and the letter <code>P</code> denotes one.
+The digits <code>0</code> and <code>2</code> through <code>9</code> do ''not'' denote their numeric values.
+They are simply elements of GF[32].
+
+The generating polynomial for our BCH code is as follows.
+
+We extend GF[32] to GF[1024] by adjoining a primitive cube root of unity, <code>ζ</code>, satisfying <code>ζ^2 = ζ + P</code>.
+
+We select <code>β := G ζ</code> which has order 93, and construct the product <code>(x - β^i)</code> for <code>i</code> in <code>{17, 20, 46, 49, 52, 77, 78, 79, 80, 81, 82, 83, 84}</code>.
+The resulting polynomial is our generating polynomial for our 13 character checksum:
+
+ x^13 + E x^12 + M x^11 + 3 x^10 + G x^9 + Q x^8 + E x^7 + E x^6 + E x^5 + L x^4 + M x^3 + C x^2 + S x + S
+
+For our long checksum, we select <code>γ := E + X ζ</code>, which has order 1023, and construct the product <code>(x - γ^i)</code> for <code>i</code> in <code>{32, 64, 96, 895, 927, 959, 991, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026}</code>.
+The resulting polynomial is our generating polynomial for our 15 character checksum for long strings:
+
+ x^15 + 0 x^14 + 2 x^13 + E x^12 + 6 x^11 + F x^10 + E x^9 + 4 x^8 + X x^7 + H x^6 + 4 x^5 + X x^4 + 9 x^3 + K x^2 + Y x^1 + H
+
+(Reminder: the character <code>0</code> does ''not'' denote the zero of the field.)
diff --git a/bip-0118.mediawiki b/bip-0118.mediawiki
index a3a690b..93e0578 100644
--- a/bip-0118.mediawiki
+++ b/bip-0118.mediawiki
@@ -73,7 +73,7 @@ To convert a 33-byte BIP 118 public key for use with [[bip-0340.mediawiki|BIP 34
==== Signature message ====
-The function ''SigMsg118(hash_type, ext_flag)'' computes the message being signed as a byte array, analogously to ''SigMsg(hash_type, ext_flag)'' defined in [[bip-0341.mediawiki|BIP 341]], ''SigExt118(hash_type,key_version)'' computes the extension, similarly to [[bip-0342.mediawiki|BIP 342]].
+We define the functions ''Msg118(hash_type)'' and ''Ext118(hash_type)'' which compute the message being signed as a byte array.
The parameter ''hash_type'' is an 8-bit unsigned value, reusing values defined in [[bip-0341.mediawiki|BIP 341]], with the addition that the values <code>0x41</code>, <code>0x42</code>, <code>0x43</code>, <code>0xc1</code>, <code>0xc2</code>, and <code>0xc3</code> are also valid for BIP 118 public keys.
@@ -82,64 +82,56 @@ We define the following constants using bits 6 and 7 of <code>hash_type</code>:
* <code>SIGHASH_ANYPREVOUT = 0x40</code>
* <code>SIGHASH_ANYPREVOUTANYSCRIPT = 0xc0</code>
-As per [[bip-0341.mediawiki|BIP 341]], the parameter ''ext_flag'' is an integer in the range 0-127, used for indicating that extensions are added at the end of the message. The parameter ''key_version'' is an 8-bit unsigned value (an integer in the range 0-255) used for committing to the public key version.
-
The following restrictions apply and cause validation failure if violated:
* Using any undefined ''hash_type'' (not ''0x00'', ''0x01'', ''0x02'', ''0x03'', ''0x41'', ''0x42'', ''0x43'', ''0x81'', ''0x82'', ''0x83'', ''0xc1'', ''0xc2'', or ''0xc3'').
* Using <code>SIGHASH_SINGLE</code> without a "corresponding output" (an output with the same index as the input being verified).
-If these restrictions aren't violated, ''SigMsg118(hash_type,ext_flag)'' evaluates to the concatenation of the following data, in order (with byte size of each item listed in parentheses). Numerical values in 2, 4, or 8-byte items are encoded in little-endian.
+If these restrictions are not violated, ''Msg118(hash_type)'' evaluates as follows.
+
+If ''hash_type & 0x40 == 0'', then ''Msg118(hash_type) = SigMsg(hash_type, 1)'', where ''SigMsg'' is as defined in [[bip-0341.mediawiki|BIP 341]].
+
+If ''hash_type & 0x40 != 0'', then ''Msg118(hash_type)'' is the concatenation of the following data, in order (with byte size of each item listed in parentheses). Numerical values in 2, 4, or 8-byte items are encoded in little-endian.
* Control:
** ''hash_type'' (1).
* Transaction data:
** ''nVersion'' (4): the ''nVersion'' of the transaction.
** ''nLockTime'' (4): the ''nLockTime'' of the transaction.
-** If ''hash_type & 0xc0'' is zero:
-*** ''sha_prevouts'' (32): the SHA256 of the serialization of all input outpoints.
-*** ''sha_amounts'' (32): the SHA256 of the serialization of all spent output amounts.
-*** ''sha_scriptpubkeys'' (32): the SHA256 of the serialization of all spent output ''scriptPubKey''s.
-*** ''sha_sequences'' (32): the SHA256 of the serialization of all input ''nSequence''.
** If ''hash_type & 3'' does not equal <code>SIGHASH_NONE</code> or <code>SIGHASH_SINGLE</code>:
*** ''sha_outputs'' (32): the SHA256 of the serialization of all outputs in <code>CTxOut</code> format.
* Data about this input:
-** ''spend_type'' (1): equal to ''(ext_flag * 2) + annex_present'', where ''annex_present'' is 0 if no annex is present, or 1 otherwise (the original witness stack has two or more witness elements, and the first byte of the last element is ''0x50'')
-** If ''hash_type & 0xc0'' is non-zero:
-*** If ''hash_type & 0xc0'' is <code>SIGHASH_ANYONECANPAY</code>:
-**** ''outpoint'' (36): the <code>COutPoint</code> of this input (32-byte hash + 4-byte little-endian).
-*** If ''hash_type & 0xc0'' is <code>SIGHASH_ANYONECANPAY</code> or <code>SIGHASH_ANYPREVOUT</code>:
-**** ''amount'' (8): value of the previous output spent by this input.
-**** ''scriptPubKey'' (35): ''scriptPubKey'' of the previous output spent by this input, serialized as script inside <code>CTxOut</code>. Its size is always 35 bytes.
-*** ''nSequence'' (4): ''nSequence'' of this input.
-** If ''hash_type & 0xc0'' is zero:
-*** ''input_index'' (4): index of this input in the transaction input vector. Index of the first input is 0.
+** ''spend_type'' (1): equal to 2 if no annex is present, or 3 otherwise (the original witness stack has two or more witness elements, and the first byte of the last element is ''0x50'')
+** If ''hash_type & 0xc0'' is <code>SIGHASH_ANYPREVOUT</code>:
+*** ''amount'' (8): value of the previous output spent by this input.
+*** ''scriptPubKey'' (35): ''scriptPubKey'' of the previous output spent by this input, serialized as script inside <code>CTxOut</code>. Its size is always 35 bytes.
+** ''nSequence'' (4): ''nSequence'' of this input.
** If an annex is present (the lowest bit of ''spend_type'' is set):
*** ''sha_annex'' (32): the SHA256 of ''(compact_size(size of annex) || annex)'', where ''annex'' includes the mandatory ''0x50'' prefix.
* Data about this output:
** If ''hash_type & 3'' equals <code>SIGHASH_SINGLE</code>:
*** ''sha_single_output'' (32): the SHA256 of the corresponding output in <code>CTxOut</code> format.
-Similarly, ''SigExt118(hash_type,key_version)'' evaluates to the concatenation of:
+Similarly, ''Ext118(hash_type)'' evaluates to the concatenation of the following data, in order:
* Extension:
** If ''hash_type & 0xc0'' is not <code>SIGHASH_ANYPREVOUTANYSCRIPT</codE>:
*** ''tapleaf_hash'' (32): the tapleaf hash as defined in [[bip-0341.mediawiki|BIP 341]]
-** ''key_version'' (1).
+** ''key_version'' (1): a constant value ''0x01'' representing that this is a signature for a BIP 118 public key.
** ''codesep_pos'' (4): the opcode position of the last executed <code>OP_CODESEPARATOR</code> before the currently executed signature opcode, with the value in little endian (or ''0xffffffff'' if none executed). The first opcode in a script has a position of 0. A multi-byte push opcode is counted as one opcode, regardless of the size of data being pushed.
-Note that if ''hash_type & 0x40'' is zero, ''SigMsg118(hash_type,ext_flag) == SigMsg(hash_type,ext_flag)'', and ''SigExt118(hash_type,0x00) == ext'' (where ''ext'' is the message extension as defined in [[bip-0342.mediawiki|BIP 342]]).
-
To verify a signature ''sig'' for a BIP 118 public key ''p'':
-* If the ''sig'' is 64 bytes long, return ''Verify(p, hash<sub>TapSigHash</sub>(0x00 || SigMsg118(0x00, 1) || SigExt118(0x00, 0x01), sig)'', where ''Verify'' is defined in [[bip-0340.mediawiki|BIP 340]].
-* If the ''sig'' is 65 bytes long, return ''sig[64] &ne; 0x00 and Verify(p, hash<sub>TapSighash</sub>(0x00 || SigMsg118(sig[64], 1) || SigExt118(sig[64], 0x01), sig[0:64])''.
+* If the ''sig'' is 64 bytes long, return ''Verify(p, hash<sub>TapSigHash</sub>(0x00 || Msg118(0x00) || Ext118(0x00)), sig)''
+* If the ''sig'' is 65 bytes long, return ''sig[64] &ne; 0x00 and Verify(p, hash<sub>TapSighash</sub>(0x00 || Msg118(sig[64]) || Ext118(sig[64])), sig[0:64])''.
* Otherwise, fail.
+''Verify'' is as defined in [[bip-0340.mediawiki|BIP 340]].
+
The key differences from [[bip-0342.mediawiki|BIP 342]] signature verification are:
* In all cases, <code>key_version</code> is set to the constant value <code>0x01</code> instead of <code>0x00</code>.<ref>'''Why change key_version?''' Changing <code>key_version</code> ensures that if the same private key is used to generate both a [[bip-0342.mediawiki|BIP 342]] key and a BIP 118 public key, that a signature for the [[bip-0342.mediawiki|BIP 342]] key is not also valid for the BIP 118 public key (and vice-versa).</ref>
* If <code>SIGHASH_ANYPREVOUT</code> is set, the digest is calculated as if <code>SIGHASH_ANYONECANPAY</code> was set, except <code>outpoint</code> is not included in the digest.
-* If <code>SIGHASH_ANYPREVOUTANYSCRIPT</code> is set, the digest is calculated as if <code>SIGHASH_ANYONECANPAY</code> was set, except <code>outpoint</code>, <code>scriptPubKey</code> and <code>tapleaf_hash</code> are not included in the digest.
+* If <code>SIGHASH_ANYPREVOUTANYSCRIPT</code> is set, the digest is calculated as if <code>SIGHASH_ANYONECANPAY</code> was set, except <code>outpoint</code>, <code>amount</code>, <code>scriptPubKey</code> and <code>tapleaf_hash</code> are not included in the digest.
== Security ==
diff --git a/bip-0151.mediawiki b/bip-0151.mediawiki
index 005c552..793c244 100644
--- a/bip-0151.mediawiki
+++ b/bip-0151.mediawiki
@@ -5,10 +5,11 @@
Author: Jonas Schnelli <dev@jonasschnelli.ch>
Comments-Summary: Controversial; some recommendation, and some discouragement
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0151
- Status: Withdrawn
+ Status: Replaced
Type: Standards Track
Created: 2016-03-23
License: PD
+ Superseded-By: 324
</pre>
== Abstract ==
diff --git a/bip-0158.mediawiki b/bip-0158.mediawiki
index 484e674..8887d32 100644
--- a/bip-0158.mediawiki
+++ b/bip-0158.mediawiki
@@ -273,10 +273,8 @@ This BIP defines one initial filter type:
The basic filter is designed to contain everything that a light client needs to
sync a regular Bitcoin wallet. A basic filter MUST contain exactly the
following items for each transaction in a block:
-* The previous output script (the script being spent) for each input, except
- for the coinbase transaction.
-* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output
- scripts.
+* The previous output script (the script being spent) for each input, except for the coinbase transaction.
+* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output scripts.
Any "nil" items MUST NOT be included into the final set of filter elements.
diff --git a/bip-0174.mediawiki b/bip-0174.mediawiki
index 67f1c69..f4c0807 100644
--- a/bip-0174.mediawiki
+++ b/bip-0174.mediawiki
@@ -120,7 +120,7 @@ The currently defined global types are as follows:
| <tt>PSBT_GLOBAL_TX_VERSION = 0x02</tt>
| None
| No key data
-| <tt><32-bit little endian uint version></tt>
+| <tt><32-bit little endian int version></tt>
| The 32-bit little endian signed integer representing the version number of the transaction being created. Note that this is not the same as the PSBT version number specified by the PSBT_GLOBAL_VERSION field.
| 2
| 0
@@ -410,7 +410,7 @@ The currently defined per-input types are defined as follows:
| <tt>PSBT_IN_REQUIRED_HEIGHT_LOCKTIME = 0x12</tt>
| None
| No key data
-| <tt><32-bit uiht locktime></tt>
+| <tt><32-bit uint locktime></tt>
| 32 bit unsigned little endian integer less than 500000000 representing the minimum block height that this input requires to be set as the transaction's lock time.
|
| 0
@@ -453,7 +453,7 @@ The currently defined per-input types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_IN_TAP_BIP32_DERIVATION = 0x16</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this input. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this input. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -591,7 +591,7 @@ determine which outputs are change outputs and verify that the change is returni
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_OUT_TAP_BIP32_DERIVATION = 0x07</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this output. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this output. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -676,7 +676,7 @@ The Signer must only accept a PSBT.
The Signer must only use the UTXOs provided in the PSBT to produce signatures for inputs.
Before signing a non-witness input, the Signer must verify that the TXID of the non-witness UTXO matches the TXID specified in the unsigned transaction.
Before signing a witness input, the Signer must verify that the witnessScript (if provided) matches the hash specified in the UTXO or the redeemScript, and the redeemScript (if provided) matches the hash in the UTXO.
-The Signer may choose to fail to sign a segwit input if a non-witness UTXO is not provided. <ref>'''Why would non-witness UTXOs be provided for segwit inputs?''' The sighash algorithm for Segwit specified in BIP 173 is known to have an issue where an attacker could trick a user to sending Bitcoin to fees if they are able to convince the user to sign a malicious transaction multiple times. This is possible because the amounts in <tt>PSBT_IN_WITNESS_UTXO</tt> of other segwit inputs can be modified without effecting the signature for a particular input. In order to prevent this kind of attack, many wallets are requiring that the full previous transaction (i.e. <tt>PSBT_IN_NON_WITNESS_UTXO</tt>) be provided to ensure that the amounts of other inputs are not being tampered with.</ref>
+The Signer may choose to fail to sign a segwit input if a non-witness UTXO is not provided. <ref>'''Why would non-witness UTXOs be provided for segwit inputs?''' The sighash algorithm for Segwit specified in BIP 143 is known to have an issue where an attacker could trick a user to sending Bitcoin to fees if they are able to convince the user to sign a malicious transaction multiple times. This is possible because the amounts in <tt>PSBT_IN_WITNESS_UTXO</tt> of other segwit inputs can be modified without effecting the signature for a particular input. In order to prevent this kind of attack, many wallets are requiring that the full previous transaction (i.e. <tt>PSBT_IN_NON_WITNESS_UTXO</tt>) be provided to ensure that the amounts of other inputs are not being tampered with.</ref>
The Signer should not need any additional data sources, as all necessary information is provided in the PSBT format.
The Signer must only add data to a PSBT.
Any signatures created by the Signer must be added as a "Partial Signature" key-value pair for the respective input it relates to.
@@ -718,15 +718,8 @@ sign_non_witness(script_code, i):
if IsMine(key) and IsAcceptable(sighash_type):
sign(non_witness_sighash(script_code, i, input))
-for input,i in enumerate(psbt.inputs):
- if non_witness_utxo.exists:
- assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
- if redeemScript.exists:
- assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
- sign_non_witness(redeemScript, i)
- else:
- sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
- else if witness_utxo.exists:
+for input, i in enumerate(psbt.inputs):
+ if witness_utxo.exists:
if redeemScript.exists:
assert(witness_utxo.scriptPubKey == P2SH(redeemScript))
script = redeemScript
@@ -737,6 +730,13 @@ for input,i in enumerate(psbt.inputs):
else if IsP2WSH(script):
assert(script == P2WSH(witnessScript))
sign_witness(witnessScript, i)
+ else if non_witness_utxo.exists:
+ assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
+ if redeemScript.exists:
+ assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
+ sign_non_witness(redeemScript, i)
+ else:
+ sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
else:
assert False
</pre>
diff --git a/bip-0176.mediawiki b/bip-0176.mediawiki
index 8a49bfa..60311c4 100644
--- a/bip-0176.mediawiki
+++ b/bip-0176.mediawiki
@@ -28,7 +28,7 @@ Potential benefits of utilizing "bits" include:
== Specification ==
Definition: 1 bit = 100 satoshis.
-Plural of "bit" is "bits". The terms "bit" and "bits" are not proper nouns and thus should not be capitalized unless used at the start of a sentence, etc.
+Plural of "bit" is "bits." The terms "bit" and "bits" are not proper nouns and thus should not be capitalized unless used at the start of a sentence, etc.
All bitcoin-denominated items are encouraged to also show the denomination in bits, either as the default or as an option.
@@ -37,16 +37,16 @@ As bitcoin grows in price versus fiat currencies, it's important to give users t
Existing terms used in bitcoin such as satoshi, milli-bitcoin (mBTC) and bitcoin (BTC) do not conflict as they operate at different orders of magnitude.
-The term micro-bitcoin (µBTC) can continue to exist in tandem with the term "bits".
+The term micro-bitcoin (µBTC) can continue to exist in tandem with the term "bits."
== Backwards Compatibility ==
-Software such as the Bitcoin Core GUI currently use the µBTC denomination and can continue to do so. There is no obligation to switch to "bits".
+Software such as the Bitcoin Core GUI currently use the µBTC denomination and can continue to do so. There is no obligation to switch to "bits."
The term "bit" has many different definitions, but the ones of particular note are these:
-* 1 bit = 1/8 dollar (e.g. That candy cost me 2 bits)
-* bit meaning some amount of data (e.g. The first bit of the version field is 0)
-* bit meaning strength of a cryptographic algorithm (e.g. 256-bit ECDSA is used in Bitcoin)
+* 1 bit = 1/8 dollar (e.g., that candy cost me 2 bits {or 1/4 dollar})
+* bit meaning some amount of data (e.g., the first bit of the version field is 0)
+* bit meaning strength of a cryptographic algorithm (e.g., 256-bit ECDSA is used in Bitcoin)
The first is a bit dated and isn't likely to confuse people dealing with Bitcoin. The second and third are computer science terms and context should be sufficient to figure out what the user of the word means.
@@ -54,4 +54,4 @@ The first is a bit dated and isn't likely to confuse people dealing with Bitcoin
This BIP is licensed under the BSD 2-clause license.
== Credit ==
-It's hard to ascertain exactly who invented the term "bits", but the term has been around for a while and the author of this BIP does not take any credit for inventing the term. \ No newline at end of file
+It's hard to ascertain exactly who invented the term "bits," but the term has been around for a while and the author of this BIP does not take any credit for inventing the term.
diff --git a/bip-0300.mediawiki b/bip-0300.mediawiki
index 3cecd85..20d1936 100644
--- a/bip-0300.mediawiki
+++ b/bip-0300.mediawiki
@@ -15,7 +15,7 @@
==Abstract==
-In Bip300, txns are not signed via cryptographic key. Instead, they are "signed" by the accumulation of hashpower over time.
+In Bip300, txns are not signed via cryptographic key. Instead, they are "signed" by hashpower, over time. Like a big multisig, 13150-of-26300, where each block is a new "signature".
Bip300 emphasizes slow, transparent, auditable transactions which are easy for honest users to get right and very hard for dishonest users to abuse. The chief design goal for Bip300 is ''partitioning'' -- users may safely ignore Bip300 txns if they want to (or Bip300 entirely).
@@ -27,7 +27,7 @@ See [http://www.drivechain.info/ this site] for more information.
As Reid Hoffman [https://blockstream.com/2015/01/13/en-reid-hoffman-on-the-future-of-the-bitcoin-ecosystem/ wrote in 2014]: "Sidechains allow developers to add features and functionality to the Bitcoin universe without actually modifying the Bitcoin Core code...Consequently, innovation can occur faster, in more flexible and distributed ways, without losing the synergies of a common platform with a single currency."
-Coins such as Namecoin, Monero, ZCash, and Sia, offer features that Bitcoiners cannot access -- not without selling their BTC to invest in a rival monetary unit. According to [https://coinmarketcap.com/charts/#dominance-percentage coinmarketcap.com], there is now more value *outside* the BTC protocol than within it. According to [https://cryptofees.info/ cryptofees.info], 10x more txn fees are paid outside the BTC protocol, than within it.
+Today, coins such as Namecoin, Monero, ZCash, and Sia, offer features that Bitcoiners cannot access -- not without selling their BTC to invest in a rival monetary unit. According to [https://coinmarketcap.com/charts/#dominance-percentage coinmarketcap.com], there is now more value *outside* the BTC protocol than within it. According to [https://cryptofees.info/ cryptofees.info], 15x more txn fees are paid outside the BTC protocol, than within it.
Software improvements to Bitcoin rely on developer consensus -- BTC will pass on a good idea if it is even slightly controversial. Development is slow: we are now averaging one major feature every 5 years.
@@ -37,7 +37,9 @@ BTC can copy every useful technology, as soon as it is invented; scamcoins lose
==Specification==
-Bip300 allows for six new blockchain messages:
+===Overview===
+
+Bip300 allows for six new blockchain messages (these have consensus significance):
* M1. "Propose New Sidechain"
* M2. "ACK Proposal"
@@ -48,22 +50,12 @@ Bip300 allows for six new blockchain messages:
Nodes organize those messages into two caches:
-* D1. "Escrow_DB" -- tracks the 256 Hashrate Escrows (Escrows slots that a sidechain can live in).
-* D2. "Withdrawal_DB" -- tracks the withdrawal-Bundles (coins leaving a Sidechain).
-
-We will cover:
-
-# Adding Sidechains (D1, M1, M2)
-# Approving Withdrawals (D2, M3, M4)
-# Depositing and Withdrawing (M5, M6)
-
-=== Adding Sidechains (D1, M1, M2) ===
-
-
-==== D1 -- "Escrow_DB" ====
+* D1. "The Sidechain List", which tracks the 256 Hashrate Escrows (Escrows are slots that a sidechain can live in).
+* D2. "The Withdrawal List", which tracks the withdrawal-Bundles (coins leaving a Sidechain).
-The table below enumerates the new database fields, their size in bytes, and their purpose. A sidechain designer is free to choose any value for these.
+==== D1 (The Sidechain List) ====
+D1 is a list of active sidechains. D1 is updated via M1 and M2.
{| class="wikitable"
|- style="font-weight:bold; text-align:center; vertical-align:middle;"
@@ -133,16 +125,61 @@ The table below enumerates the new database fields, their size in bytes, and the
| Of the CTIP, the second element of the pair: the Index. See #11 above.
|}
-D1 is updated via M1 and M2.
+==== D2 (The Withdrawal List) ====
-==== M1 -- "Propose New Sidechain" ====
+D2 lists withdrawal-attempts. If these attempts succeed, they will pay coins "from" a Bip300-locked UTXO, to new UTXOs controlled by the withdrawing-user. Each attempt pays out many users, so we call these withdrawal-attempts "Bundles".
-Examples:
+D2 is driven by M3, M4, M5, and M6. Those messages enforce the following principles:
-<img src="bip-0300/m1-gui.jpg?raw=true" align="middle"></img>
+# The Bundles have a canonical order (first come first serve).
+# From one block to the next, every "Blocks Remaining" field decreases by 1.
+# When "Blocks Remaining" reaches zero the Bundle is removed.
+# From one block to the next, the value in "ACKs" may either increase or decrease, by a maximum of 1 (see M4).
+# If a Bundle's "ACKs" reach 13150 or greater, it "succeeds" and its corresponding M6 message can be included in a block.
+# If the M6 of a Bundle is paid out, it is also removed.
+# If a Bundle cannot possibly succeed ( 13500 - "ACKs" > "Blocks Remaining" ), it is removed immediately.
-<img src="bip-0300/m1-cli.png?raw=true" align="middle"></img>
+
+{| class="wikitable"
+! Field No.
+! Label
+! Type
+! Description / Purpose
+|-
+| 1
+| Sidechain Number
+| uint8_t
+| Links the withdrawal-request to a specific hashrate escrow.
+|-
+| 2
+| Bundle Hash
+| uint256
+| A withdrawal attempt. Specifically, it is a "blinded transaction id" (ie, the double-Sha256 of a txn that has had two fields zeroed out, see M6) of a txn which could withdraw funds from a sidechain.
+|-
+| 3
+| ACKs (Work Score)
+| uint16_t
+| The current ACK-counter, which is the total number of ACKs (the PoW that has been used to validate the Bundle).
+|-
+| 4
+| Blocks Remaining (Age)
+| uint16_t
+| The number of blocks which this Bundle has remaining to accumulate ACKs
+|}
+
+
+
+
+
+
+=== The Six New Bip300 Messages ===
+
+First, how are new sidechains created?
+
+They are first proposed (with M1), and later acked (with M2). This process resembles Bip9 soft fork activation.
+
+==== M1 -- Propose Sidechain ====
M1 is a coinbase OP Return output containing the following:
@@ -159,170 +196,136 @@ M1 is a coinbase OP Return output containing the following:
32-byte hashID1
20-byte hashID2
-M1 is used in conjunction with M2.
-
-==== M2 -- "ACK Sidechain Proposal" ====
+===== Examples =====
- 1-byte - OP_RETURN (0x6a)
- 4-byte - Message header (0xD6E1C5BF)
- 32-byte - sha256D hash of sidechain's serialization
+<img src="bip-0300/m1-gui.jpg?raw=true" align="middle"></img>
-==== M1/M2 Validation Rules ====
+<img src="bip-0300/m1-cli.png?raw=true" align="middle"></img>
-# Any miner can propose a new sidechain at any time. This procedure resembles BIP 9 soft fork activation: the network must see a properly-formatted M1, followed by "acknowledgment" of the sidechain in 90% of the following 2016 blocks.
-# It is possible to "overwrite" a sidechain. This requires more ACKs -- 50% of the following 26300 blocks must contain an M2. The possibility of overwrite, does not change the security assumptions (because we already assume that users perform extra-protocolic validation at a rate of 1 bit per 26300 blocks).
+==== M2 -- ACK Sidechain Proposal ====
+M2 is a coinbase OP Return output containing the following:
-=== Approving Withdrawals (D2, M3, M4) ===
+ 1-byte - OP_RETURN (0x6a)
+ 4-byte - Message header (0xD6E1C5BF)
+ 32-byte - sha256D hash of sidechain's serialization
-Withdrawals in Bip300 (ie, "M6"), are very significant. So, we will first discuss how these are approved/rejected -- a process involving M3, M4, and D2.
+===== Notes =====
-==== What are Bundles? ====
+The new M1/M2 validation rules are:
-All Bip300 withdrawals take the form of “Bundles” (formerly known as “WT^s”) -- named because they "bundle up" many individual withdrawal-requests into one single rare layer1 transaction.
+# Any miner can propose a new sidechain (M1) at any time. This procedure resembles BIP 9 soft fork activation: the network must see a properly-formatted M1, followed by "acknowledgment" of the sidechain (M2) in 90% of the following 2016 blocks.
+# Bip300 comes with only 256 sidechain-slots. If all are used, it is possible to "overwrite" a sidechain. This requires vastly more M2 ACKs -- 50% of the following 26300 blocks must contain an M2. The possibility of overwrite, does not change the Bip300 security assumptions (because we already assume that the sidechain is vulnerable to miners, at a rate of 1 catastrophe per 13150 blocks).
-This bundle either pays all of the withdrawals out, or else it fails (and pays nothing out). Bip300 / layer 1 does not assemble Bundles (the sidechain developer does this in a manner of their choosing).
-Bundles are identified by a 32-byte hash, which aspires to be the TxID of M6. Unfortunately, the Bundle-hash and M6-TxID cannot match exactly, since the first input to M6 is a CTIP which is constantly changing. So, we must accomplish a task, which is conceptually similar to AnyPrevOut (BIP 118). We define a "blinded TxID" as a way of hashing a txn, in which some bytes are first overwritten with zeros. In our case, these bytes are the first input and the first output.
-D2 controls Bundles, and is driven by M3, M4, M5, and M6.
+==== Notes on Withdrawing Coins ====
+Bip300 withdrawals ("M6") are very significant.
-==== D2 -- "Withdrawal_DB" ====
+For an M6 to be valid, it must be first "prepped" by one M3 and then 13,150+ M4s. M3 and M4 are about "Bundles".
+===== What are Bundles? =====
-{| class="wikitable"
-! Field No.
-! Label
-! Type
-! Description / Purpose
-|-
-| 1
-| Sidechain Number
-| uint8_t
-| Links the withdrawal-request to a specific hashrate escrow.
-|-
-| 2
-| Bundle Hash
-| uint256
-| A withdrawal attempt. Specifically, it is a "blinded transaction id" (ie, the double-Sha256 of a txn that has had two fields zeroed out, see M6) of a txn which could withdraw funds from a sidechain.
-|-
-| 3
-| ACKs (Work Score)
-| uint16_t
-| The current ACK-counter, which is the total number of ACKs (the PoW that has been used to validate the Bundle).
-|-
-| 4
-| Blocks Remaining (Age)
-| uint16_t
-| The number of blocks which this Bundle has remaining to accumulate ACKs
-|}
+Sidechain withdrawals take the form of “Bundles” -- named because they "bundle up" many individual withdrawal-requests into a single rare layer1 transaction.
-A hash of D2 exists in each coinbase txn, and has consensus-significance.
+Sidechain full nodes aggregate the withdrawal-requests into a big set. The sidechain calculates what M6 would have to look like, to pay all of these withdrawal-requests out. Finally, the sidechain calculates what the hash of this M6 would be. This 32-byte hash identifies the Bundle.
-==== D2 Validation Rules ====
+This 32-byte hash is what miners will be slowly ACKing over 3-6 months, not the M6 itself (nor any sidechain data, of course).
-# The D2 hash commitment must be in each block (unless D2 is blank).
-# The Bundles must be listed in a canonical order (so that the hashes match).
-# From one block to the next, "Age" fields must increase by exactly 1 (ie, Blocks Remaining decreases by 1).
-# Bundles are stored in D2 until they fail (which occurs at "Age" = "MaxAge"), or they succeed (Bundle is paid out).
-# From one block to the next, the value in the ACKs field (ACK-counter) can increase or decrease by a maximum of 1 (see below).
+A bundle either pays all its withdrawals out (via M6), or else it fails (and pays nothing out).
-If a Bundle succeeds (in D2), it "becomes" an M6 message and is included in a block.
+===== Bundle Hash = Blinded TxID of M6 =====
-So, first: how do we add a Bundle to D2?
+The Bundle hash is static as it is being ACKed. Unfortunately, the M6 TxID will be constantly changing -- as users deposit to the sidechain, the input to M6 will change.
-==== M3 -- "Propose Bundle" ====
+To solve this problem, we do something conceptually similar to AnyPrevOut (BIP 118). We define a "blinded TxID" as a way of hashing a txn, in which some bytes are first overwritten with zeros. These are: the first input and the first output. Via the former, a sidechain can accept deposits, even if we are acking a TxID that spends from it later. Via the latter, we can force all of the non-withdrawn coins to be returned to the sidechain (even if we don't yet know how many coins this will be).
+==== M3 -- Propose Bundle ====
-Nodes will add an entry to D2 if there is a coinbase output with the following:
+M3 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Commitment header (0xD45AA943)
32-byte - The Bundle hash, to populate a new D2 entry
+The new validation rules pertaining to M3 are:
-==== M3 Validation Rules ====
-
-# If the network detects a properly-formatted M3, it must add an entry to D2 in the very next block. The starting Blocks Remaining value is 26,299. The starting ACKs count is 1.
+# If the network detects a properly-formatted M3, it must add an entry to D2 in the very next block. The starting "Blocks Remaining" value is 26,299. The starting ACKs count is 1.
# Each block can only contain one M3 per sidechain.
Once a Bundle is in D2, how can we give it enough ACKs to make it valid?
-==== M4 -- "ACK Withdrawal" ====
+==== M4 -- ACK Bundle(s) ====
-From one block to the next, "ACKs" can only change as follows:
+M4 is a coinbase OP Return output containing the following:
-* The ACK-counter of any Bundle can only change by (-1,0,+1).
-* Within a sidechain-group, upvoting one Bundle ("+1") requires you to downvote all other Bundles in that group. However, the minimum ACK-counter is zero.
-* While only one Bundle can be upvoted at once; the whole group can all be unchanged at once ("abstain"), and they can all be downvoted at once ("alarm").
+ 1-byte - OP_RETURN (0x6a)
+ 4-byte - Commitment header (0xD77D1776)
+ 1-byte - Version
+ n-byte - The vector describing the "upvoted" bundle-choice, for each sidechain.
-M4 does not need to be explicitly transmitted. It can simply be inferred from the new state of D2. M4 can therefore be improved over time, without affecting consensus.
+Version 0x01 uses one byte per sidechain, and applies in most cases. Version 0x02 uses two bytes per sidechain and applies in unusual situations where at least one sidechain has more than 256 distinct withdrawal-bundles in progress at one time. Other interesting versions are possible: 0x03 might say "do exactly what was done in the previous block" (which could consume a fixed 6 bytes total, regardless of how many sidechains). 0x04 might say "upvote everyone who is clearly in the lead" (which also would require a mere 6 bytes), and so forth.
-Nonetheless, one option for explicit transmission of M4 is [https://github.com/drivechain-project/mainchain/blob/8901d469975752d799b6a7a61d4e00a9a124028f/src/validation.cpp#L3735-L3790 in our code].
+If a sidechain has no pending bundles, then it is skipped over when M4 is created and parsed.
-Often, M4 does not need to be transmitted at all. If there are n Sidechains and m Withdrawals-per-sidechain, then there are (m+2)^n total candidates for the next D2. So, when m and n are low, all of the possible D2s can be trivially computed in advance.
+The upvote vector will code "abstain" as 0xFF (or 0xFFFF); it will code "alarm" as 0xFE (or 0xFFFE). Otherwise it simply indicates which withdrawal-bundle in the list, is the one to be "upvoted". For example, if there are two sidechains, and we wish to upvote the 7th bundle on sidechain #1 plus the 4th bundle on sidechain #2, then the vector would be 0x0704.
-Miners can impose a "soft limit" on m, blocking new withdrawal-attempts until previous ones expire. Even if they fail to do this, a a worst-case scenario of n=200 and m=1,000, honest nodes can communicate the M4 with ~25 KB per block [4+1+1+(200\*(1000+1+1)/8)].
+The M4 message will be invalid (and invalidate the block), if it tries to upvote a Bundle that doesn't exist (for example, trying to upvote the 7th bundle on sidechain #2, when sidechain #2 has only three bundles). If there are no Bundles at all (no one is trying to withdraw from any sidechain), then *any* M4 message present in the coinbase will be invalid. If M4 is NOT present in a block, then it is treated as "abstain".
-Finally, we give Deposits and Withdrawals.
+The ACKed withdrawal will gain one point for its ACK field. Therefore, the ACK-counter of any Bundle can only change by (-1,0,+1).
-=== Deposits and Withdrawals (M5, M6) ===
+Within a sidechain-group, upvoting one Bundle ("+1") requires you to downvote all other Bundles in that group. However, the minimum ACK-counter is zero. While only one Bundle can be upvoted at once; the whole group can all be unchanged at once ("abstain"), and they can all be downvoted at once ("alarm").
-Both M5 and M6 are regular Bitcoin txns. They are identified, as Deposits/Withdrawals, when they select one of the special CTIP UTXOs as one of their inputs (see D1).
+Finally, we describe Deposits and Withdrawals.
-All of a sidechain’s coins, are stored in one UTXO. (Deposits/Withdrawals never cause UTXO bloat.) So, each Deposit/Withdrawal must select a CTIP, and generate a new CTIP (this is tracked in D1, above).
+==== M5 -- Deposit BTC to Sidechain ====
-If the from-CTIP-to-CTIP quantity of coins goes '''up''', (ie, if the user is adding coins), then the txn is treated as a Deposit (M5). Else it is treated as a Withdrawal (M6). See [https://github.com/drivechain-project/mainchain/blob/8901d469975752d799b6a7a61d4e00a9a124028f/src/validation.cpp#L668-L781 here].
+Both M5 and M6 are regular Bitcoin txns. They are distinguished from regular txns (non-M5 non-M6 txns), when they select one of the special Bip300 CTIP UTXOs as one of their inputs (see D1).
+All of a sidechain’s coins, are stored in one UTXO, called the "CTIP". Every time a deposit or withdrawal is made, the CTIP changes. Each deposit/withdrawal will select the sidechains CTIP, and generate a new CTIP. (Deposits/Withdrawals never cause UTXO bloat.) The current CTIP is cached in D1 (above).
-==== M5. "Make a Deposit" -- a transfer of BTC from-main-to-side ====
+If the '''quantity of coins''', in the from-CTIP-to-CTIP transaction, goes '''up''', (ie, if the user is adding coins), then the txn is treated as a Deposit (M5). Else it is treated as a Withdrawal (M6). See [https://github.com/drivechain-project/mainchain/blob/e37b008fafe0701b8313993c8b02ba41dc0f8a29/src/validation.cpp#L667-L780 here].
As far as mainchain consensus is concerned, all deposits to a sidechain are always valid.
-==== M6. "Execute Withdrawal" -- a transfer of BTC from-side-to-main ====
+==== M6 -- Withdraw BTC from a Sidechain ====
We come, finally, to the critical matter: where users can take their money *out* of the sidechain.
-In each block, a Bundle in D2 is considered "approved" if its "ACK-counter" value meets the threshold (13,150).
-
+First, M6 must obey the same CTIP rules of M5 (see immediately above).
-The Bundle must meet all these criteria:
+Second, an M6 is only valid for inclusion in a block, if its blinded TxID matches an "approved" Bundle hash (ie, one with an ACK score of 13150+). In other words, an M6 can only be included in a block, after the 3+ month (13150 block) ceremony.
-# "Be ACKed" -- The "blinded TxID" of this txn must be a member of the "approved candidate" set in the D2 of this block.
-# "Return Change to Account" -- TxOut0 must pay coins back to the sidechain's CTIP.
-# "Return *all* Change to Account" -- Sum of inputs must equal the sum of outputs. No traditional tx fee is possible.
+Third, M6 must meet two accounting criteria, lest it be invalid:
+# "Give change back to Escrow" -- The first output, TxOut0, must be paid back to the sidechain's Bip300 script. In other words, all non-withdrawn coins must be paid back into the sidechain.
+# "No traditional txn fee" -- For this txn, the sum of all inputs must equal the sum of all outputs. No traditional tx fee is possible. (Of course, there is still a txn fee for miners: it is paid via an OP TRUE output in the Bundle.) We want the withdraw-ers to set the fee "inside" the Bundle, and ACK it over 3 months like everything else.
==Backward compatibility==
-As a soft fork, older software will continue to operate without modification. Non-upgraded nodes will see a number of phenomena that they don't understand -- coinbase txns with non-txn data, value accumulating in anyone-can-spend UTXOs for months at a time, and then random amounts leaving the UTXO in single, infrequent bursts. However, these phenomena don't affect them, or the validity of the money that they receive.
+As a soft fork, older software will continue to operate without modification. Non-upgraded nodes will see a number of phenomena that they don't understand -- coinbase txns with non-txn data, value accumulating in anyone-can-spend UTXOs for months at a time, and then random amounts leaving these UTXOs in single, infrequent bursts. However, these phenomena don't affect them, or the validity of the money that they receive.
-( As a nice bonus, note that the sidechains themselves inherit a resistance to hard forks. The only way to guarantee that a sidechain's Bundles will continue to match identically, is to upgrade sidechains via soft forks of themselves. )
+( As a nice bonus, note that the sidechains themselves inherit a resistance to hard forks. The only way to guarantee that all different sidechain-nodes will always report the same Bundle, is to upgrade sidechains via soft forks of themselves. )
==Deployment==
+This BIP will be deployed via UASF-style block height activation. Block height TBD.
-This BIP will be deployed by "version bits" BIP9 with the name "hrescrow" and using bit 4.
-<pre>
-// Deployment of Drivechains (BIPX, BIPY)
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].bit = 4;
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].nStartTime = 1642276800; // January 15th, 2022.
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].nTimeout = 1673812800; // January 15th, 2023.
-</pre>
==Reference Implementation==
+See: https://github.com/drivechain-project/mainchain
-See: https://github.com/DriveNetTESTDRIVE/DriveNet
-
-Also, for interest, see an example sidechain here: https://github.com/drivechain-project/bitcoin/tree/sidechainBMM
+Also, for interest, see an example sidechain here: https://github.com/drivechain-project/sidechains/tree/testchain
==References==
+https://github.com/drivechain-project/mainchain
+https://github.com/drivechain-project/sidechains/tree/testchain
See http://www.drivechain.info/literature/index.html
diff --git a/bip-0300/m1-cli.png b/bip-0300/m1-cli.png
index 2454848..3361b28 100644
--- a/bip-0300/m1-cli.png
+++ b/bip-0300/m1-cli.png
Binary files differ
diff --git a/bip-0300/m1-gui.jpg b/bip-0300/m1-gui.jpg
index edae79c..2b05556 100644
--- a/bip-0300/m1-gui.jpg
+++ b/bip-0300/m1-gui.jpg
Binary files differ
diff --git a/bip-0301.mediawiki b/bip-0301.mediawiki
index 2f6b79e..e1522f8 100644
--- a/bip-0301.mediawiki
+++ b/bip-0301.mediawiki
@@ -161,28 +161,20 @@ As with all previous soft forks, non-upgraded users are indirectly affected, in
==Deployment==
-This BIP will be deployed by "version bits" BIP9 with the name "blindmm" and using bit 4.
-
-<pre>
-// Deployment of Drivechains (BIPX, BIPY)
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].bit = 4;
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].nStartTime = 1642276800; // January 15th, 2022.
-consensus.vDeployments[Consensus::DEPLOYMENT_DRIVECHAINS].nTimeout = 1673812800; // January 15th, 2023.
-</pre>
+This BIP will be deployed via UASF-style block height activation. Block height TBD.
==Reference Implementation==
-See: https://github.com/DriveNetTESTDRIVE/DriveNet
+See: https://github.com/drivechain-project/mainchain
-Also, for interest, see an example sidechain here: https://github.com/drivechain-project/bitcoin/tree/sidechainBMM
+Also, for interest, see an example sidechain here: https://github.com/drivechain-project/sidechains/tree/testchain
==References==
* http://www.drivechain.info/literature/index.html
* http://www.truthcoin.info/blog/blind-merged-mining/
-* https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-July/014789.html
* http://www.truthcoin.info/images/bmm-outline.txt
diff --git a/bip-0324.mediawiki b/bip-0324.mediawiki
new file mode 100644
index 0000000..ced459b
--- /dev/null
+++ b/bip-0324.mediawiki
@@ -0,0 +1,590 @@
+<pre>
+ BIP: 324
+ Layer: Peer Services
+ Title: Version 2 P2P Encrypted Transport Protocol
+ Author: Dhruv Mehta <dhruv@bip324.com>
+ Tim Ruffing <crypto@timruffing.de>
+ Jonas Schnelli <dev@jonasschnelli.ch>
+ Pieter Wuille <bitcoin-dev@wuille.net>
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0324
+ Status: Draft
+ Type: Standards Track
+ Created: 2019-03-08
+ License: BSD-3-Clause
+ Replaces: 151
+</pre>
+
+== Introduction ==
+
+=== Abstract ===
+This document proposes a new Bitcoin P2P transport protocol, which features opportunistic encryption, a mild bandwidth reduction, and the ability to negotiate upgrades before exchanging application messages.
+
+=== Copyright ===
+This document is licensed under the 3-clause BSD license.
+
+=== Motivation ===
+Bitcoin is a permissionless network whose purpose is to reach consensus over public data. Since all data relayed in the Bitcoin P2P network is inherently public, and the protocol lacks a notion of cryptographic identities, peers talk to each other over unencrypted and unauthenticated connections. Nevertheless, this plaintext nature of the current P2P protocol (referred to as v1 in this document) has severe drawbacks in the presence of attackers:
+
+* While the relayed data itself is public in nature, the associated metadata may reveal private information and hamper privacy of users. For example, a global passive attacker eavesdropping on all Bitcoin P2P connections can trivially identify the source and timing of a transaction.
+* Since connections are unauthenticated, they can be tampered with at a low cost and often even with a low risk of detection. For example, an attacker can alter specific bytes of a connection (such as node flags) on-the-fly without the need to keep any state.
+* The protocol is self-revealing. For example, deep packet inspection can identify a P2P connection trivially because connections start with a fixed sequence of magic bytes. The ability to detect connections enables censorship and facilitates the aforementioned attacks as well as other attacks which require the attacker to control the connections of victims, e.g., eclipse attacks targeted at miners.
+
+This proposal for a new P2P protocol version (v2) aims to improve upon this by raising the costs for performing these attacks substantially, primarily through the use of unauthenticated, opportunistic transport encryption. In addition, the bytestream on the wire is made pseudorandom (i.e., indistinguishable from uniformly random bytes) to a passive eavesdropper.
+
+* Encryption, even when it is unauthenticated and only used when both endpoints support v2, impedes eavesdropping by forcing the attacker to become active: either by performing a persistent man-in-the-middle (MitM) attack, by downgrading connections to v1, or by spinning up their own nodes and getting honest nodes to make connections to them. Active attacks at scale are more resource intensive in general, but in the case of manual, deliberate connections (as opposed to automatic, random ones), they are also in principle detectable: even very basic checks, e.g., operators manually comparing protocol versions and session IDs (as supported by the proposed protocol), will expose the attacker.
+* Tampering, while already an inherently active attack, is costlier if the attacker is forced to maintain the state necessary for a full MitM interception.
+* A pseudorandom bytestream excludes identification techniques based on pattern matching, and makes it easier to shape the bytestream in order to mimic other protocols used on the Internet. This raises the cost of a connection censoring firewall, forcing them to either resort to a full MitM attack, or operate on a more obvious allowlist basis, rather than a blocklist basis.
+
+''' Why encrypt without authentication?'''
+
+As we have argued above, unauthenticated encryption<ref name="what_does_auth_mean">'''What does ''authentication'' mean in this context?''' Unfortunately, the term authentication in the context of secure channel protocols is ambiguous. It can refer to:
+* The encryption scheme guaranteeing that a message obtained via successful decryption was encrypted by someone having access to the (symmetric) encryption key, and not modified after encryption by a third party. The proposal in this document achieves that property through the use of an AEAD.
+* The communication protocol establishing that the communication partner's identity matches who we expect them to be, through some public key mechanism. The proposal in this document does '''not''' include such a mechanism.</ref> provides strictly better security than no encryption. Thus, all connections should use encryption, even if they are unauthenticated.
+
+When it comes to authentication, the situation is not as clear as for encryption. Due to Bitcoin's permissionless nature, authentication will always be restricted to specific scenarios (e.g., connections between peers belonging to the same operator), and whether some form of (possibly partially anonymous) authentication is desired depends on the specific requirements of the involved peers. As a consequence, we believe that authentication should be addressed separately (if desired), and this proposal aims to provide a solid technical basis for future protocol upgrades, including the addition of optional authentication (see [https://github.com/sipa/writeups/tree/main/private-authentication-protocols Private authentication protocols]).
+
+''' Why have a pseudorandom bytestream when traffic analysis is still possible? '''
+
+Traffic analysis, e.g., observing packet lengths and timing, as well as active attacks can still reveal that the Bitcoin v2 P2P protocol is in use. Nevertheless, a pseudorandom bytestream raises the cost of fingerprinting the protocol substantially, and may force some intermediaries to attack any protocol they cannot identify, causing collateral cost.
+
+A pseudorandom bytestream is not self-identifying. Moreover, it is unopinionated and thus a canonical choice for similar protocols. As a result, Bitcoin P2P traffic will be indistinguishable from traffic of other protocols which make the same choice (e.g., [https://gitlab.com/yawning/obfs4 obfs4] and a recently proposed [https://datatracker.ietf.org/doc/draft-cpbs-pseudorandom-ctls/ cTLS extension]). Moreover, traffic shapers and protocol wrappers (for example, making the traffic look like HTTPS or SSH) can further mitigate traffic analysis and active attacks but are out of scope for this proposal.
+
+''' Why not use a secure tunnel protocol? '''
+
+Our goal includes making opportunistic encryption ubiquitously available, as that provides the best defense against large-scale attacks. That implies protecting both the manual, deliberate connections node operators instruct their software to make, and the automatic connections Bitcoin nodes make with each other based on IP addresses obtained via gossip. While encryption per se is already possible with proxy networks or VPN protocols, these are not desirable or applicable for automatic connections at scale:
+* Proxy networks like Tor or I2P introduce a separate address space, independent of network topology, with a very low cost per address making eclipse attacks cheaper. In comparison, clearnet IPv4 and IPv6 networks make obtaining multiple network identities in distinct, well-known network partitions carry a non-trivial cost. Thus, it is not desirable to have a substantial portion of nodes be exclusively connected this way, as this would significantly reduce Eclipse attack costs.<ref name="pure_tor_attack">'''Why is it a bad idea to have nodes exclusively connected over Tor?''' See the [https://arxiv.org/abs/1410.6079 Bitcoin over Tor isn't a Good Idea] paper</ref> Additionally, Tor connections come with significant bandwidth and latency costs that may not be desirable for all network users.
+* VPN protocols like WireGuard or OpenVPN inherently define a private network, which requires manual configuration and therefore is not a realistic avenue for automatic connections.
+
+Thus, to achieve our goal, we need a solution that has minimal costs, works without configuration, and is always enabled – on top of any network layer rather than be part of the network layer.
+
+''' Why not use a general-purpose transport encryption protocol? '''
+
+While it would be possible to rely on an off-the-shelf transport encryption protocol such as TLS or Noise, the specific requirements of the Bitcoin P2P network laid out above make these protocols an unsuitable choice.
+
+The primary requirement which existing protocols fail to meet is a sufficiently modular treatment of encryption and authentication. As we argue above, whether and which form of authentication is desired in the Bitcoin P2P network will depend on the specific requirements of the involved peers (resulting in a mix of authenticated and unauthenticated connections), and thus the question of authentication should be decoupled from encryption. However, native support for a handful of standard authentication scenarios (e.g., using digital signatures and certificates) is at the core of the design of existing general-purpose transport encryption protocols. This focus on authentication would not provide clear benefits for the Bitcoin P2P network but would come with a large amount of additional complexity.
+
+In contrast, our proposal instead aims for a simple modular design that makes it possible to address authentication separately. Our proposal provides a foundation for authentication by exporting a ''session ID'' that uniquely identifies the encrypted channel. After an encrypted channel has been established, the two endpoints are able to use any authentication protocol to confirm that they have the same session ID. (This is sometimes called ''channel binding'' because the session ID binds the encrypted channel to the authentication protocol.) Since in our proposal, any authentication needs to run after an encrypted connection has been established, the price we pay for this modularity is a possibly higher number of roundtrips as opposed to other protocols that perform authentication alongside the Diffie-Hellman key exchange.<ref name="channel_binding_noise_tls">'''Do other protocols not support exporting a session ID?''' While [https://noiseprotocol.org/noise.html#channel-binding Noise] and [https://datatracker.ietf.org/doc/draft-ietf-kitten-tls-channel-bindings-for-tls13/ TLS (as a draft)] offer similar protocol extensions for exporting session IDs, using channel binding for authentication is not at the focus of their design and would not avoid the bulk of additional complexity due to the native support of authentication methods. </ref> However, the resulting increase in connection establishment latency is a not a concern for Bitcoin's long-lived connections, [https://www.dsn.kastel.kit.edu/bitcoin/ which typically live for hours or even weeks].
+
+Besides this fundamentally different treatment of authentication, further technical issues arise when applying TLS or Noise to our desired use case:
+
+* Neither offers a pseudorandom bytestream.
+* Neither offers native support for elliptic curve cryptography on the curve secp256k1 as otherwise used in Bitcoin. While using secp256k1 is not strictly necessary, it is the obvious choice is for any new asymmetric cryptography in Bitcoin because it minimizes the cryptographic hardness assumptions as well as the dependencies that Bitcoin software will need.
+* Neither offers shapability of the bytestream.
+* Both provide a stream-based interface to the application layer, whereas Bitcoin requires a packet-based interface, resulting in the need for an additional thin layer to perform packet serialization and deserialization.
+
+While existing protocols could be amended to address all of the aforementioned issues, this would negate the benefits of using them as off-the-shelf solution, e.g., the possibility to re-use existing implementations and security analyses.
+
+== Goals ==
+
+This proposal aims to achieve the following properties:
+
+* Confidentiality against passive attacks: A passive attacker having recorded a v2 P2P bytestream (without timing and fragmentation information) must not be able to determine the plaintext being exchanged by the nodes.
+* Observability of active attacks: A session ID identifying the encrypted channel uniquely is derived deterministically from a Diffie-Hellman negotiation. An active man-in-the-middle attacker is forced to incur a risk of being detected as peer operators can compare session IDs manually, or using optional authentication methods possibly introduced in future protocol versions.
+* Pseudorandom bytestream: A passive attacker having recorded a v2 P2P bytestream (without timing information and fragmentation information) must not be able to distinguish it from a uniformly random bytestream.
+* Shapable bytestream: It should be possible to shape the bytestream to increase resistance to traffic analysis (for example, to conceal block propagation), or censorship avoidance.<ref name="shapable_hs_tor_circumvention">'''How can shapability help circumvent fragmentation-pattern based censoring?''' See [https://gitlab.torproject.org/legacy/trac/-/issues/20348#note_2229522 this Tor issue] as an example.</ref>
+* Forward secrecy: An eavesdropping attacker who compromises a peer's sessions secrets should not be able to decrypt past session traffic, except for the latest few packets.
+* Upgradability: The proposal provides an upgrade path using transport versioning which can be used to add features like authentication, PQC handshake upgrade, etc. in the future.
+* Compatibility: v2 clients will allow inbound v1 connections to minimize risk of network partitions.
+* Low overhead: the introduction of a new P2P transport protocol should not substantially increase computational cost or bandwidth for nodes that implement it, compared to the current protocol.
+
+== Specification ==
+
+The specification consists of three parts:
+
+* The '''Transport layer''' concerns how to set up an encrypted connection between two nodes, capable of transporting application-level messages between them.
+* The '''Application layer''' concerns how to encode Bitcoin P2P messages and commands for transport by the Transport Layer.
+* The '''Signaling''' concerns how v2 nodes advertise their support for the v2 protocol to potential peers.
+
+=== Transport layer specification ===
+
+In this section, we define the encryption protocol for messages between peers.
+
+==== Overview and design ====
+
+We first give an informal overview of the entire protocol flow and packet encryption.
+
+'''Protocol flow overview'''
+
+Given a newly established connection (typically TCP/IP) between two v2 P2P nodes, there are 3 phases the connection goes through. The first starts immediately, i.e. there are no v1 messages or any other bytes exchanged on the link beforehand. The two parties are called the '''initiator''' (who established the connection) and the '''responder''' (who accepted the connection).
+
+# The '''Key exchange phase''', where nodes exchange data to establish shared secrets.
+#* The initiator:
+#** Generates a random ephemeral secp256k1 private key and sends a corresponding 64-byte ElligatorSwift<ref name="ellswift_paper">'''What is ElligatorSwift and why use it?''' The [https://eprint.iacr.org/2022/759.pdf SwiftEC paper] describes a method called ElligatorSwift which allows encoding elliptic curve points in a way that is indistinguishable from a uniformly distributed bitstream. While a random 256-bit string has about 50% chance of being a valid X coordinate on the secp256k1 curve, every 512-bit string is a valid ElligatorSwift encoding of a curve point, making the encoded point indistinguishable from random when using an encoder that can sample uniformly.</ref><ref name="ellswift_perf">'''How fast is ElligatorSwift?''' Our benchmarks show that ElligatorSwift encoded ECDH is about 50% more expensive than unencoded ECDH. Given the fast performance of ECDH and the low frequency of new connections, we found the performance trade-off acceptable for the pseudorandom bytestream and future censorship resistance it can enable.</ref>-encoded public key to the responder.
+#** May send up to 4095<ref name="why_4095_garbage">'''How was the limit of 4095 bytes garbage chosen?''' It is a balance between having sufficient freedom to hide information, and allowing it to be large enough so that the necessary 64 bytes of public key is small compared to it on the one hand, and bandwidth waste on the other hand.</ref> bytes of arbitrary data after their public key, called '''garbage''', providing a form of shapability and avoiding a recognizable pattern of exactly 64 bytes.<ref name="why_garbage">'''Why does the affordance for garbage exist in the protocol?''' The garbage strings after the public keys are needed for shapability of the handshake. Neither peer can send decoy packets before having received at least the other peer's public key, i.e., neither peer can send more than 64 bytes before having received 64 bytes.</ref>
+#* The responder:
+#** Waits until one byte is received which does not match the 12 bytes consisting of the network magic followed by "version\x00". If the first 12 bytes do match, the connection is treated as using the v1 protocol instead.<ref name="why_no_prefix_check">'''What if a v2 initiator's public key starts accidentally with these 12 bytes?''' This is so unlikely (probability of ''2<sup>-96</sup>'') to happen randomly in the v2 protocol that the initiator does not need to specifically avoid it.</ref><ref>Bitcoin Core versions <=0.4.0 and >=22.0 ignore valid P2P messages that are received prior to a VERSION message. Bitcoin Core versions between 0.4.0 and 22.0 assign a misbehavior score to the peer upon receiving such messages. v2 clients implementing this proposal will interpret any message other than VERSION received as the first message to be the initiation of a v2 connection, and will result in disconnection for v1 initiators that send any message type other than VERSION as the first message. We are not aware of any implementations where this could pose a problem.</ref>
+#** Similarly generates a random ephemeral private key and sends a corresponding 64-byte ElligatorSwift-encoded public key to the initiator.
+#** Similarly may send up to 4095 bytes of garbage data after their public key.
+#* Both parties:
+#** Receive (the remainder of) the full 64-byte public key from the other side.
+#** Use X-only<ref name="xonly_ecdh">'''Why use X-only ECDH?''' Using only the X coordinate provides the same security as using a full encoding of the secret curve point but allows for more efficient implementation by avoiding the need for square roots to compute Y coordinates.</ref> ECDH to compute a shared secret from their private key and the exchanged public keys<ref name="why_ecdh_pubkeys">'''Why is the shared secret computation a function of the exact 64-byte public encodings sent?''' This makes sure that an attacker cannot modify the public key encoding used without modifying the rest of the stream. If a third party wants the ability to modify stream bytes, they need to perform a full MitM attack on the connection.</ref>, and deterministically derive from the secret 4 '''encryption keys''' (two in each direction: one for packet lengths, one for content encryption), a '''session id''', and two 16-byte '''garbage terminators'''<ref>'''What length is sufficient for garbage terminators?''' The length of the garbage terminators determines the probability of accidental termination of a legitimate v2 connection due to garbage bytes (sent prior to ECDH) inadvertently including the terminator. 16 byte terminators with 4095 bytes of garbage yield a negligible probability of such collision which is likely orders of magnitude lower than random connection failure on the Internet.</ref><ref>'''What does a garbage terminator in the wild look like?''' <div>[[File:bip-0324/garbage_terminator.png|none|256px|A garbage terminator model TX-v2 in the wild... sent by the responder]]</div>
+</ref> (one in each direction) using HKDF-SHA256.
+#** Send their 16-byte garbage terminator<ref name="why_garbage_term">'''Why does the protocol need a garbage terminator?''' While it is in principle possible to use the garbage authentication packet directly as a terminator (scan until a valid authentication packet follows), this would be significantly slower than just scanning for a fixed byte sequence, as it would require recomputing a Poly1305 tag after every received byte.</ref> followed by a '''garbage authentication packet'''<ref name="why_garbage_auth">'''Why does the protocol require a garbage authentication packet?''' Without the garbage authentication packet, the garbage would be modifiable by a third party without consequences. We want to force any active attacker to have to maintain a full protocol state. In addition, such malleability without the consequence of connection termination could enable protocol fingerprinting.</ref>, an '''encrypted packet''' (see further) with arbitrary '''contents''', and '''associated data''' equal to the garbage.
+#** Receive up to 4111 bytes, stopping when encountering the garbage terminator.
+#** Receive an encrypted packet, verify that it decrypts correctly with associated data set to the garbage received, and then ignore its contents.
+#* At this point, both parties have the same keys, and all further communication proceeds in the form of encrypted packets. Packets have an '''ignore bit''', which makes them '''decoy packets''' if set. Decoy packets are to be ignored by the receiver apart from verifying they decrypt correctly. Either peer may send such decoy packets at any point after this. These form the primary shapability mechanism in the protocol. How and when to use them is out of scope for this document.
+# The '''Version negotiation phase''', where parties negotiate what transport version they will use, as well as data defined by that version.<ref name="example_versions">'''What features could be added in future protocol versions?''' Examples of features that could be added in future versions include post-quantum cryptography upgrades to the handshake, and optional authentication.</ref>
+#* The responder:
+#** Sends a '''version packet''' with empty content, to indicate support for the v2 P2P protocol proposed by this document. Any other value for content is reserved for future versions.
+#* The initiator:
+#** Receives a packet, ignores its contents. The idea is that features added by future versions get negotiated based on what is supported by both parties. Since there is just one version so far, the contents here can simply be ignored. But in the future, receiving a non-empty contents here may trigger other behavior; we defer specifying the encoding for such version content until there is a need for it.<ref name="version_negotiation">'''How will future versions encode version numbers in the version packet?''' Future versions could, for example, specify that the contents of the version packet is to be interpreted as an integer version number (with empty representing 0), and if the minimum of both numbers is N, that being interpreted as choosing a "v2.N" protocol version. Alternatively, certain bytes of the version packet contents could be interpreted as a bitvector of optional features.</ref>
+#** Sends a '''version packet''' with empty content as well, to indicate support for the v2 P2P protocol.
+#* The responder:
+#** Receives a packet, ignores its contents.
+# The '''Application phase''', where the packets exchanged have contents to be interpreted as application data.
+#* Whenever either peer has a message to send, it sends a packet with that application message as '''contents'''.
+
+To avoid the recognizable pattern of first messages being at least 64 bytes, a future backwards-compatible upgrade to this protocol may allow both peers to send their public key + garbage + garbage terminator in multiple rounds, slicing those bytes up into messages arbitrarily, as long as progress is guaranteed.<ref name="handshake_progress">'''How can progress be guaranteed in a backwards-compatible way?''' In order to guarantee progress, it must be ensured that no deadlock occurs, i.e., no state is reached in which each party waits for the other party indefinitely. For example, any upgrade that adheres to the following conditions will guarantee progress:
+
+* The initiator must start by sending at least as many bytes as necessary to mismatch the magic/version 12 bytes prefix.
+* The responder must start sending after having received at least one byte that mismatches that 12-byte prefix.
+* As soon as either party has received the other peer's garbage terminator, or has received 4095 bytes of garbage, they must send their own garbage terminator. (When either of these conditions is met, the other party has nothing to respond with anymore that would be needed to guarantee progress otherwise.)
+* Whenever either party receives any nonzero number of bytes, while not having sent their garbage terminator completely yet, they must send at least one byte in response without waiting for more bytes.
+* After either party has sent their garbage terminator, they must also send the garbage authentication packet without waiting for more bytes, and transition to the version negotiation phase.
+
+Since the protocol as specified here adheres to these conditions, any upgrade which also adheres to these conditions will be backwards-compatible.</ref>
+
+Note that the version negotiation phase does not need to wait for the key exchange phase to complete; version packets can be sent immediately after sending the garbage authentication packet. So the first two phases together, jointly called '''the handshake''', comprise just 1.5 roundtrips:
+
+* the initiator sends public key + garbage
+* the responder sends public key + garbage + garbage terminator + garbage authentication packet + version packet
+* the initiator sends garbage terminator + garbage authentication packet + version packet
+
+'''Packet encryption overview'''
+
+All data on the wire after the garbage terminators takes the form of encrypted packets. Every packet encodes an encrypted variable-length byte array, called the '''contents''', as well as an '''ignore bit''' as mentioned before. The total size of a packet is 20 bytes plus the length of its contents.
+
+Each packet consists of:
+* A 3-byte encrypted '''length''' field, encoding the length of the '''contents''' (between ''0'' and ''2<sup>24</sup>-1''<ref name="max_packet_length">'''Is ''2<sup>24</sup>-1'' bytes sufficient as maximum content size?''' The current Bitcoin P2P protocol has no messages which support more than 4000000 bytes of application payload. By supporting up to ''2<sup>24</sup>-1'' we can accommodate future evolutions needing more than 4 times that value. Hypothetical protocol changes that have even more data to exchange than that should probably use multiple separate messages anyway, because of the per-peer receive buffer sizes involved, and the inability to start processing a message before it is fully received. Of course, future versions of the transport protocol could change the size of the length field, if this were really needed.</ref>, inclusive).
+* An authenticated encryption of the '''plaintext''', which consists of:
+** A 1-byte '''header''' which consists of transport layer protocol flags. Currently, only the highest bit is defined as the '''ignore bit'''. The other bits are ignored, but this may change in future versions<ref>'''Why is the header a part of the plaintext and not included alongside the length field?''' The packet length field is the minimum information that must be available before we can leverage the standard RFC8439 AEAD. Any other data, including metadata like the header being in the content encryption makes it easier to reason about the protocol security w.r.t. data being used before it is authenticated. If the ignore bit was not part of the content, another mechanism would be needed to authenticate it; for example, it could be fed as AAD to the AEAD cipher. We feel the complexity of such an approach outweighs the benefit of saving one byte per message.</ref>.
+** The variable-length '''contents'''.
+
+The encryption of the plaintext uses '''[https://en.wikipedia.org/wiki/ChaCha20-Poly1305 ChaCha20Poly1305]'''<ref name="why_chacha20">'''Why is ChaCha20Poly1305 chosen as the basis for packet encryption?''' It is a very widely used authenticated encryption cipher (used among others in SSH, TLS 1.2, TLS 1.3, [https://en.wikipedia.org/wiki/QUIC QUIC], Noise, and [https://www.wireguard.com/protocol/ WireGuard]; in the latter it is currently even the only supported cipher), with very good performance in general purpose software implementations. While AES-based ciphers (including the winners in the [https://competitions.cr.yp.to/caesar.html CAESAR] competition in non-lightweight categories) perform significantly better on systems with AES hardware acceleration, they are also significantly slower in pure software implementations. We choose to optimize for the weakest hardware.</ref>, an [https://en.wikipedia.org/wiki/Authenticated_encryption authenticated encryption with associated data] (AEAD) cipher specified in [https://datatracker.ietf.org/doc/html/rfc8439 RFC 8439]. Every packet's plaintext is treated as a separate AEAD message, with a different nonce for each.
+
+The length must be dealt with specially, as it is needed to determine packet boundaries before the whole packet is received and authenticated. As we want a stream that is pseudorandom to a passive attacker, it still needs encryption. We use unauthenticated<ref name="why_no_len_auth">'''Why is the length encryption not separately authenticated?''' Informally, the relevant security goal we aim for is to hide the number of packets and their lengths (i.e., the packet boundaries) against a passive attacker that receives the bytestream without timing or fragmentation information. (A formal definition can be found for example in [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Definition 22)] under the name "boundary hiding against chosen-plaintext attacks (BH-CPA)".) However, we do not aim to hide packet boundaries against active attackers because active attackers can always exploit the fact that the Bitcoin P2P protocol is largely query-response based: they can trickle the bytes on the stream one-by-one unmodified and observe when a response comes (see [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Section 3.9)] for a in-depth discussion). With that in mind, we accept that an active (non-MitM) attacker is able to figure out some information about packet boundaries by flipping certain bits in the unauthenticated length field, and observing the other side disconnecting immediately or later. Thus, we choose to use unauthenticated encryption for the length data, which is sufficient to achieve boundary hiding against passive attackers, and saves 16 bytes of bandwidth per packet.</ref> '''ChaCha20''' encryption for this, with an independent key. Note that the plaintext length is still implicitly authenticated by the encryption of the plaintext, but this can only be verified after receiving the whole packet. This design is inspired by that of the ChaCha20Poly1305 cipher suite in [http://bxr.su/OpenBSD/usr.bin/ssh/PROTOCOL.chacha20poly1305 OpenSSH].<ref name="openssl_changes">'''How does packet encryption differ from the OpenSSH design?''' The differences are:
+* The length field is only 3 bytes instead of 4, as that is sufficient for our purposes.
+* Length encryption keeps drawing pseudorandom bytes from the same ChaCha20 cipher for multiple packets, rather than incrementing the nonce for every packet.
+* The Poly1305 authentication tag only covers the encrypted plaintext, and not the encrypted length field. This means that plaintext encryption uses the standard ChaCha20Poly1305 construction without any modifications, maximizing applicability of analysis and review of that cipher. The length encryption can be seen as a separate layer, using a separate key, and thus cannot affect any of the confidentiality or integrity guarantees of the plaintext encryption. On the other hand, this change w.r.t. OpenSSH also does not worsen any properties, as incorrect lengths will still trigger authentication failure for the overall packet (the plaintext length is implicitly authenticated by ChaCha20Poly1305).
+* A hash step is performed every 224<ref name="rekey_interval">'''How was the rekeying interval 224 chosen?''' Assuming a node sends only ping messages every 20 minutes (the timeout interval for post-[https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki BIP31] connections) on a connection, the node will transmit 224 packets in about 3.11 days. This means ''soft rekeying'' after a fixed number of packets automatically translates to an upper-bound of time interval for rekeying, while being much simpler to coordinate than an actual time-based rekeying regime. At the same time, doing it once every 224 messages is sufficiently infrequent that it has only negligible impact on performance. Furthermore, 224 times 3 bytes (the number of bytes consumed by each length encryption) is 672, which is a multiple of 64 minus 32. This means that at the end of 224 length encryptions, exactly 32 bytes of keystream data remain that can be used as next key.</ref> messages to rekey the encryption ciphers, in order to provide forward security.
+</ref> Because only fixed-length chunks (3-byte length fields) are encrypted, we do not need to treat all length chunks as separate messages. Instead, a single cipher (with the same nonce) is used for multiple consecutive length fields. This avoids wasting 61 pseudorandom bytes per packet, and makes the cost of having a separate cipher for length encryption negligible.<ref name="ok_to_batch">'''Is it acceptable to use a less standard construction for length encryption?''' The fact that multiple (non-overlapping) bytes generated by a single ChaCha20 cipher are used for the encryption of multiple consecutive length fields is uncommon. We feel the performance cost gained by this deviation is worth it (especially for small packets, which are very common in Bitcoin's P2P protocol), given the low guarantees that are feasible for length encryption in the first place, and the result is still sufficient to provide pseudorandomness from the view of passive attackers. For plaintext encryption, we independently use a very standard construction, as the stakes for confidentiality and integrity there are much higher.</ref>
+
+In order to provide forward security<ref name="rekey">'''What value does forward security provide?''' Re-keying ensures [https://eprint.iacr.org/2001/035.pdf forward secrecy within a session], i.e., an attacker compromising the current session secrets cannot derive past encryption keys in the same session.</ref><ref>'''Why have a cipher with forward secrecy but no periodical refresh of the ECDH key exchange?''' Our cipher ratchets encryption keys forward in order to protect messages encrypted under ''past'' encryption keys. In contrast, re-performing ECDH key exchange would protect messages encrypted under ''future'' encryption keys, i.e., it would re-establish security after the attacker had compromised one of the peers ''temporarily'' (e.g., the attacker obtains a memory dump). We do not believe protecting against that is a priority: an attacker that, for whatever reason, is capable of an attack that reveals encryption keys (or other session secrets) of a peer once is likely capable of performing the same attack again after peers have re-performed the ECDH key exchange. Thus, we do not believe the benefits of re-performing key exchange outweigh the additional complexity that comes with the necessary coordination between the peers. We note that the initiator could choose to close and re-open the entire connection to force a refresh of the ECDH key exchange, but that introduces other issues: a connection slot needs to be kept open at the responder side, it is not cryptographically guaranteed that really the same initiator will use it, and the observable TCP reset and handshake may create a detectable pattern.</ref>, the encryption keys for both plaintext and length encryption are cycled every 224 messages, by switching to a new key that is generated by the key stream using the old key.
+
+==== Handshake: key exchange and version negotiation ====
+
+Next we specify the handshake of a connection in detail.
+
+As explained before, these messages are sent to set up the connection:
+
+<pre>
+ ----------------------------------------------------------------------------------------------------
+ | Initiator Responder |
+ | |
+ | x, ellswift_X = ellswift_create(initiating=True) |
+ | |
+ | --- ellswift_X + initiator_garbage (initiator_garbage_len bytes; max 4095) ---> |
+ | |
+ | y, ellswift_Y = ellswift_create(initiating=False) |
+ | ecdh_secret = v2_ecdh( |
+ | y, ellswift_X, ellswift_Y, initiating=False) |
+ | v2_initialize(initiator, ecdh_secret, initiating=False) |
+ | |
+ | <-- ellswift_Y + responder_garbage (responder_garbage_len bytes; max 4095) + |
+ | responder_garbage_terminator (16 bytes) + |
+ | v2_enc_packet(initiator, b'', aad=responder_garbage) + |
+ | v2_enc_packet(initiator, RESPONDER_TRANSPORT_VERSION) --- |
+ | |
+ | ecdh_secret = v2_ecdh(x, ellswift_Y, ellswift_X, initiating=True) |
+ | v2_initialize(responder, ecdh_secret, initiating=True) |
+ | |
+ | --- initiator_garbage_terminator (16 bytes) + |
+ | v2_enc_packet(responder, b'', aad=initiator_garbage) + |
+ | v2_enc_packet(responder, INITIATOR_TRANSPORT_VERSION) ---> |
+ | |
+ ----------------------------------------------------------------------------------------------------
+</pre>
+
+===== Shared secret computation =====
+
+The peers derive their shared secret through X-only ECDH, hashed together with the exactly 64-byte public keys' encodings sent over the wire.
+
+<pre>
+def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating):
+ ecdh_point_x32 = ellswift_ecdh_xonly(ellswift_theirs, priv)
+ if initiating:
+ # Initiating, place our public key encoding first.
+ return sha256_tagged("bip324_ellswift_xonly_ecdh", ellswift_ours + ellswift_theirs + ecdh_point_x32)
+ else:
+ # Responding, place their public key encoding first.
+ return sha256_tagged("bip324_ellswift_xonly_ecdh", ellswift_theirs + ellswift_ours + ecdh_point_x32)
+</pre>
+
+Here, <code>sha256_tagged(tag, x)</code> returns a tagged hash value <code>SHA256(SHA256(tag) || SHA256(tag) || x)</code> as in [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#specification BIP340].
+
+===== ElligatorSwift encoding of curve X coordinates =====
+
+The functions <code>ellswift_create</code> and <code>ellswift_ecdh_xonly</code> encapsulate the construction of ElligatorSwift-encoded public keys, and the computation of X-only ECDH with
+ElligatorSwift-encoded public keys.
+
+First we define a constant:
+* Let ''c = 0xa2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f852''.<ref name="sqrt_minus3">'''What is the ''c'' constant used in ''XSwiftEC''?''' The algorithm requires a constant ''&radic;-3 (mod p)''; in other words, a number ''c'' such that ''-c<sup>2</sup> mod p = 3''. There are two solutions to this equation, one which is itself a square modulo ''p'', and its negation. We choose the square one.</ref>
+
+To define the needed functions, we first introduce a helper function, matching the <code>XSwiftEC</code> function from the [https://eprint.iacr.org/2022/759.pdf SwiftEC] paper, instantiated for the secp256k1 curve, with minor modifications. It maps pairs of integers ''(u, t)'' (both in range ''0..p-1'') to valid X coordinates on the curve. Note that the specification here does not attempt to be constant time, as it does not operate on secret data. In what follows, we use the notation from [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#specification BIP340].
+
+* ''XSwiftEC(u, t)'':
+** Alter the inputs to guarantee an X coordinate on the curve:<ref name="ellswift_deviation">'''Why do the inputs to the XSwiftEC algorithm need to be altered?''' This step deviates from the paper, which maps a negligibly small subset of inputs (around ''3/2<sup>256</sup>'') to the point at infinity. To avoid the need to deal with the case where a peer could craft encodings that intentionally trigger this edge case, we remap them to inputs that yield a valid X coordinate.</ref>
+*** If ''u mod p = 0'', let ''u = 1'' instead.
+*** If ''t mod p = 0'', let ''t = 1'' instead.
+*** If ''(u<sup>3</sup> + t<sup>2</sup> + 7) mod p = 0'', let ''t = 2t (mod p)'' instead.
+** Let ''X = (u<sup>3</sup> + 7 - t<sup>2</sup>)/(2t) (mod p).''<ref name="modinv">'''What does the division (/) sign in modular arithmetic refer to?''' Note that the division in these expressions corresponds to multiplication with the modular inverse modulo ''p'', i.e. ''a / b (mod p)'' with nonzero ''b'' is the unique solution ''x'' for which ''bx = a (mod p)''. It can be computed as ''ab<sup>p-2</sup> (mod p)'', but more efficient algorithms exist.</ref>
+** Let ''Y = (X + t)/(cu) (mod p)''.
+** For every ''x'' in ''{u + 4Y<sup>2</sup>, (-X/Y - u)/2, (X/Y - u)/2}'' (all ''mod p''; the order matters):
+*** If ''lift_x(x)'' succeeds, return ''x''. There is at least one such ''x''.
+
+To find encodings of a given X coordinate ''x'', we first need the inverse of ''XSwiftEC''. The function ''XSwiftECInv(x, u, case)'' either returns ''t'' such that ''XSwiftEC(u, t) = x'', or ''None''. The ''case'' variable is an integer in range ''0..7'', which selects which of the up to 8 valid such ''t'' values to return:
+
+* ''XSwiftECInv(x, u, case)'':
+** If ''case & 2 = 0'':
+*** If ''lift_x(-x - u)'' succeeds, return ''None''.
+*** Let ''v = x''.
+*** Let ''s = -(u<sup>3</sup> + 7)/(u<sup>2</sup> + uv + v<sup>2</sup>) (mod p)''.
+** Else (''case & 2 = 2''):
+*** Let ''s = x - u (mod p)''.
+*** If ''s = 0'', return ''None''.
+*** Let ''r'' be the square root of ''-s(4(u<sup>3</sup> + 7) + 3u<sup>2</sup>s) (mod p).''<ref name="modsqrt">'''How to compute a square root mod ''p''?''' Due to the structure of ''p'', a candidate for the square root of ''a'' mod ''p'' can be computed as ''x = a<sup>(p+1)/4</sup> mod p''. If ''a'' is not a square mod ''p'', this formula returns the square root of ''-a mod p'' instead, so it is necessary to verify that ''x<sup>2</sup> mod p = a''. If that is the case ''-x mod p'' is a solution too, but we define "the" square root to be equal to that expression (the square root will therefore always be a square itself, as ''(p+1)/4'' is even). This algorithm is a specialization of the [https://en.wikipedia.org/wiki/Tonelli%E2%80%93Shanks_algorithm Tonelli-Shanks algorithm].</ref> Return ''None'' if it does not exist.
+*** If ''case & 1 = 1'' and ''r = 0'', return ''None''.
+*** Let ''v = (r/s - u)/2''.
+** Let ''w'' be the square root of ''s (mod p)''. Return ''None'' if it does not exist.
+** If ''case & 5 = 0'', return ''-w(u(1 - c)/2 + v)''.
+** If ''case & 5 = 1'', return ''w(u(1 + c)/2 + v)''.
+** If ''case & 5 = 4'', return ''w(u(1 - c)/2 + v)''.
+** If ''case & 5 = 5'', return ''-w(u(1 + c)/2 + v)''.
+
+The overall ''XElligatorSwift'' algorithm, matching the name used in the paper, then uses this inverse to randomly''<ref name="ellswift_helps_parroting">'''Can the ElligatorSwift encoding be used to construct public key encodings that satisfy a certain structure (and not pseudorandom)?''' The algorithm chooses the first 32 bytes (i.e., the value ''u'') and then computes a corresponding ''t'' such that the mapping to the curve point holds. In general, picking ''u'' from a uniformly random distribution provides pseudorandomness. But we can also fix any of the 32 bytes in ''u'', and the algorithm will still find a corresponding ''t''. The fact that it is possible to fix the first 32 bytes, combined with the garbage bytes in the handshake, provides a limited but very simple method of parroting other protocols such as [https://tls13.xargs.org/ TLS 1.3], which can be deployed by one of the peers without explicit support from the other peer. More general methods of parroting, e.g., introduced by defining new protocol or a protocol upgrade, are not precluded.</ref> sample encodings of ''x'':
+
+* ''XElligatorSwift(x)'':
+** Loop:
+*** Let ''u'' be a random non-zero integer in range ''1..p-1'' inclusive.
+*** Let ''case'' be a random integer in range ''0..7'' inclusive.
+*** Compute ''t = XSwiftECInv(x, u, case)''.
+*** If ''t'' is not ''None'', return ''(u, t)''. Otherwise, restart loop.
+
+This is used to define the <code>ellswift_create</code> algorithm used in the previous section; it generates a random private key, along with a uniformly sampled 64-byte ElligatorSwift-encoded public key corresponding to it:
+
+* ''ellswift_create()'':
+** Generate a random private key ''priv'' in range ''1..p-1''.
+** Let ''P = priv⋅G'', the corresponding public key point to ''priv''.
+** Let ''(u, t) = XElligatorSwift(x(P))'', an encoding of ''x(P)''.
+** ''ellswift_pub = bytes(u) || bytes(t)'', its encoding as 64 bytes.
+** Return ''(priv, ellswift_pub)''.
+
+Finally the <code>ellswift_ecdh_xonly</code> algorithm is:
+
+* ''ellswift_ecdh_xonly(ellswift_theirs, priv)'':
+** Let ''u = int(ellswift_theirs[:32]) mod p''.
+** Let ''t = int(ellswift_theirs[32:]) mod p''.
+** Return ''bytes(x(priv⋅lift_x(XSwiftEC(u, t))))''.<ref name="lift_x_choice">'''Does it matter which point ''lift_x'' maps to?''' Either point is valid, as they are negations of each other, and negations do not affect the output X coordinate.</ref>
+
+===== Keys and session ID derivation =====
+
+The authenticated encryption construction proposed here requires two 32-byte keys per communication direction. These (in addition to a session ID) are computed using HKDF<ref name="why_hkdf">'''Why use HKDF for deriving key material?''' The shared secret already involves a hash function to make sure the public key encodings contribute to it, which negates some of the need for HKDF already. We still use it as it is the standard mechanism for deriving many keys from a single secret, and its computational cost is low enough to be negligible compared to the rest of a connection setup.</ref> as specified in [https://tools.ietf.org/html/rfc5869 RFC 5869] with SHA256 as the hash function:
+
+<pre>
+def initialize_v2_transport(peer, ecdh_secret, initiating):
+ # Include NETWORK_MAGIC to ensure a connection between nodes on different networks will immediately fail
+ prk = HKDF_Extract(Hash=sha256, salt=b'bitcoin_v2_shared_secret' + NETWORK_MAGIC, ikm=ecdh_secret)
+
+ peer.session_id = HKDF_Expand(Hash=sha256, PRK=prk, info=b'session_id', L=32)
+
+ # Initialize the packet encryption ciphers.
+ initiator_L = HKDF_Expand(Hash=sha256, PRK=prk, info=b'initiator_L', L=32)
+ initiator_P = HKDF_Expand(Hash=sha256, PRK=prk, info=b'initiator_P', L=32)
+ responder_L = HKDF_Expand(Hash=sha256, PRK=prk, info=b'responder_L', L=32)
+ responder_P = HKDF_Expand(Hash=sha256, PRK=prk, info=b'responder_P', L=32)
+ garbage_terminators = HKDF_Expand(Hash=sha256, PRK=prk, info=b'garbage_terminators', L=32)
+ initiator_garbage_terminator = garbage_terminators[:16]
+ responder_garbage_terminator = garbage_terminators[16:]
+
+ if initiating:
+ peer.send_L = FSChaCha20(initiator_L)
+ peer.send_P = FSChaCha20Poly1305(initiator_P)
+ peer.send_garbage_terminator = initiator_garbage_terminator
+ peer.recv_L = FSChaCha20(responder_L)
+ peer.recv_P = FSChaCha20Poly1305(responder_P)
+ peer.recv_garbage_terminator = responder_garbage_terminator
+ else:
+ peer.send_L = FSChaCha20(responder_L)
+ peer.send_P = FSChaCha20Poly1305(responder_P)
+ peer.send_garbage_terminator = responder_garbage_terminator
+ peer.recv_L = FSChaCha20(initiator_L)
+ peer.recv_P = FSChaCha20Poly1305(initiator_P)
+ peer.recv_garbage_terminator = initiator_garbage_terminator
+
+ # To achieve forward secrecy we must wipe the key material used to initialize the ciphers:
+ memory_cleanse(ecdh_secret, prk, initiator_L, initiator_P, responder_L, responder_K)
+</pre>
+
+The session ID uniquely identifies the encrypted channel. v2 clients supporting this proposal may present the entire session ID (encoded as a hex string) to the node operator to allow for manual, out of band comparison with the peer node operator. Future transport versions may introduce optional authentication methods that compare the session ID as seen by the two endpoints in order to bind the encrypted channel to the authentication.
+
+===== Overall handshake pseudocode =====
+
+To establish a v2 encrypted connection, the initiator generates an ephemeral secp256k1 keypair and sends an unencrypted ElligatorSwift encoding of the public key to the responding peer followed by unencrypted pseudorandom bytes <code>initiator_garbage</code> of length <code>garbage_len < 4096</code>.
+
+<pre>
+def initiate_v2_handshake(peer, garbage_len):
+ peer.privkey_ours, peer.ellswift_ours = ellswift_create(initiating=True)
+ peer.sent_garbage = rand_bytes(garbage_len)
+ send(peer, peer.ellswift_ours + peer.sent_garbage)
+</pre>
+
+The responder generates an ephemeral keypair for itself and derives the shared ECDH secret (using the first 64 received bytes) which enables it to instantiate the encrypted transport. It then sends 64 bytes of the unencrypted ElligatorSwift encoding of its own public key and its own <code>responder_garbage</code> also of length <code>garbage_len < 4096</code>. If the first 12 bytes received match the v1 prefix, the v1 protocol is used instead.
+
+<pre>
+TRANSPORT_VERSION = b''
+NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9' # Mainnet network magic; differs on other networks.
+V1_PREFIX = NETWORK_MAGIC + b'version\x00'
+
+def respond_v2_handshake(peer, garbage_len):
+ peer.received_prefix = b""
+ while len(peer.received_prefix) < 12:
+ peer.received_prefix += receive(peer, 1)
+ if peer.received_prefix[-1] != V1_PREFIX[len(peer.received_prefix) - 1]:
+ peer.privkey_ours, peer.ellswift_ours = ellswift_create(initiating=False)
+ peer.sent_garbage = rand_bytes(garbage_len)
+ send(peer, ellswift_Y + peer.sent_garbage)
+ return
+ use_v1_protocol()
+</pre>
+
+Upon receiving the encoded responder public key, the initiator derives the shared ECDH secret and instantiates the encrypted transport. It then sends the derived 16-byte <code>initiator_garbage_terminator</code> followed by an authenticated, encrypted packet with empty contents<ref name="send_empty_garbauth">'''Does the content of the garbage authentication packet need to be empty?''' The receiver ignores the content of the garbage authentication packet, so its content can be anything, and it can in principle be used as a shaping mechanism too. There is however no need for that, as immediately afterward the initiator can start using decoy packets as (a much more flexible) shaping mechanism instead.</ref> to authenticate the garbage, and its own version packet. It then receives the responder's garbage and garbage authentication packet (delimited by the garbage terminator), and checks if the garbage is authenticated correctly. The responder performs very similar steps but includes the earlier received prefix bytes in the public key. As mentioned before, the encrypted packets for the '''version negotiation phase''' can be piggybacked with the garbage authentication packet to minimize roundtrips.
+
+<pre>
+def complete_handshake(peer, initiating):
+ received_prefix = b'' if initiating else peer.received_prefix
+ ellswift_theirs = receive(peer, 64 - len(received_prefix))
+ ecdh_secret = v2_ecdh(peer.privkey_ours, ellswift_theirs, peer.ellswift_ours,
+ initiating=initiating)
+ initialize_v2_transport(peer, ecdh_secret, initiating=True)
+ # Send garbage terminator + garbage authentication packet + version packet.
+ send(peer, peer.send_garbage_terminator +
+ v2_enc_packet(peer, b'', aad=peer.sent_garbage) +
+ v2_enc_packet(peer, TRANSPORT_VERSION))
+ # Skip garbage, until encountering garbage terminator.
+ received_garbage = recv(peer, 16)
+ for i in range(4096):
+ if received_garbage[-16:] == peer.recv_garbage_terminator:
+ # Receive, decode, and ignore garbage authentication packet (decoy or not)
+ v2_receive_packet(peer, aad=received_garbage, skip_decoy=False)
+ # Receive, decode, and ignore version packet, skipping decoys
+ v2_receive_packet(peer)
+ return
+ else:
+ received_garbage += recv(peer, 1)
+ # Garbage terminator was not seen after 4 KiB of garbage.
+ disconnect(peer)
+</pre>
+
+==== Packet encryption ====
+
+Lastly, we specify the packet encryption cipher in detail.
+
+===== Existing cryptographic primitives =====
+
+Packet encryption is built on two existing primitives:
+
+* '''ChaCha20Poly1305''' is specified as <code>AEAD_CHACHA20_POLY1305</code> in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.8 RFC 8439 section 2.8]. It is an authenticated encryption protocol with associated data (AEAD), taking a 256-bit key, 96-bit nonce, and an arbitrary-length byte array of associated authenticated data (AAD). Due to the built-in authentication tag, ciphertexts are 16 bytes longer than the corresponding plaintext. In what follows:
+** <code>aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''plaintext'', and returns a byte array ''ciphertext'', 16 bytes longer than the plaintext.
+** <code>aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''ciphertext'', and returns either a byte array ''plaintext'' (16 bytes shorter than the ciphertext), or ''None'' in case the ciphertext was not a valid ChaCha20Poly1305 encryption of any plaintext with the specified ''key'', ''nonce'', and ''aad''.
+* The '''ChaCha20 Block Function''' is specified in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.3 RFC 8439 section 2.3]. It is a pseudorandom function (PRF) taking a 256-bit key, 96-bit nonce, and 32-bit counter, and outputs 64 pseudorandom bytes. It is the underlying building block on which ChaCha20 (and ultimately, ChaCha20Poly1305) is built. In what follows:
+** <code>chacha20_block(key, nonce, count)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', and an integer ''count'' in range ''0..2<sup>32</sup>-1'', and returns a byte array of length 64.
+
+These will be used for plaintext encryption and length encryption, respectively.
+
+===== Rekeying wrappers: FSChaCha20Poly1305 and FSChaCha20 =====
+
+To provide re-keying every 224 packets, we specify two wrappers.
+
+The first is '''FSChaCha20Poly1305''', which represents a ChaCha20Poly1305 AEAD, which automatically changes the nonce after every message, and rekeys every 224 messages by encrypting 32 zero bytes<ref name="rekey_why_aead">'''Why is rekeying implemented in terms of an invocation of the AEAD?''' This means the FSChaCha20Poly1305 wrapper can be thought of as a pure layer around the ChaCha20Poly1305 AEAD. Actual implementations can take advantage of the fact that this formulation is equivalent to using byte 64 through 95 of the keystream output of the underlying ChaCha20 cipher as new key, avoiding the need for Poly1305 in the process.</ref>, and using the first 32 bytes of the result. Each message will be used for one packet. Note that in our protocol, any FSChaCha20Poly1305 instance is always either exclusively encryption or exclusively decryption, as separate instances are used for each direction of the protocol. The nonce used for a message is composed of the 32-bit little-endian encoding of the number of messages with the current key, followed by the 64-bit little-endian encoding of the number of rekeyings performed. For rekeying, the first 32-bit integer is set to ''0xffffffff''.
+
+<pre>
+REKEY_INTERVAL = 224
+
+class FSChaCha20Poly1305:
+ """Rekeying wrapper AEAD around ChaCha20Poly1305."""
+
+ def __init__(self, initial_key):
+ self.key = initial_key
+ self.packet_counter = 0
+
+ def crypt(self, aad, text, is_decrypt):
+ nonce = ((self.packet_counter % REKEY_INTERVAL).to_bytes(4, 'little') +
+ (self.packet_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
+ if is_decrypt:
+ ret = aead_chacha20_poly1305_decrypt(self.key, nonce, aad, text)
+ else:
+ ret = aead_chacha20_poly1305_encrypt(self.key, nonce, aad, text)
+ if (self.packet_counter + 1) % REKEY_INTERVAL == 0:
+ rekey_nonce = b"\xFF\xFF\xFF\xFF" + nonce[4:]
+ self.key = aead_chacha20_poly1305_encrypt(self.key, rekey_nonce, b"", b"\x00" * 32)[:32]
+ self.packet_counter += 1
+ return ret
+
+ def decrypt(self, aad, ciphertext):
+ return self.crypt(aad, ciphertext, True)
+
+ def encrypt(self, aad, plaintext):
+ return self.crypt(aad, plaintext, False)
+</pre>
+
+The second is '''FSChaCha20''', a (single) stream cipher which is used for the lengths of all packets. Encryption and decryption are identical here, so a single function <code>crypt</code> is exposed. It XORs the input with bytes generated using the ChaCha20 block function, rekeying every 224 chunks using the next 32 bytes of the block function output as new key. A ''chunk'' refers here to a single invocation of <code>crypt</code>. As explained before, the same cipher is used for 224 consecutive chunks, to avoid wasting cipher output. The nonce used for these batches of 224 chunks is composed of 4 zero bytes followed by the 64-bit little-endian encoding of the number of rekeyings performed. The block counter is reset to 0 after every rekeying.
+
+<pre>
+class FSChaCha20:
+ """Rekeying wrapper stream cipher around ChaCha20."""
+
+ def __init__(self, initial_key):
+ self.key = initial_key
+ self.block_counter = 0
+ self.chunk_counter = 0
+ self.keystream = b''
+
+ def get_keystream_bytes(self, nbytes):
+ while len(self.keystream) < nbytes:
+ nonce = ((0).to_bytes(4, 'little') +
+ (self.chunk_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
+ self.keystream += chacha20_block(self.key, nonce, self.block_counter)
+ self.block_counter += 1
+ ret = self.keystream[:nbytes]
+ self.keystream = self.keystream[nbytes:]
+ return ret
+
+ def crypt(self, chunk):
+ ks = self.get_keystream_bytes(len(chunk))
+ ret = bytes([ks[i] ^ chunk[i] for i in range(len(chunk))])
+ if ((self.chunk_counter + 1) % REKEY_INTERVAL) == 0:
+ self.key = self.get_keystream_bytes(32)
+ self.block_counter = 0
+ self.chunk_counter += 1
+ return ret
+</pre>
+
+===== Overall packet encryption and decryption pseudocode =====
+
+Encryption and decryption of packets then follow by composing the ciphers from the previous section as building blocks.
+
+<pre>
+LENGTH_FIELD_LEN = 3
+HEADER_LEN = 1
+IGNORE_BIT_POS = 7
+
+def v2_enc_packet(peer, contents, aad=b'', ignore=False):
+ assert len(contents) <= 2**24 - 1
+ header = (ignore << IGNORE_BIT_POS).to_bytes(HEADER_LEN, 'little')
+ plaintext = header + contents
+ aead_ciphertext = peer.send_P.encrypt(aad, plaintext)
+ enc_contents_len = peer.send_L.encrypt(len(contents).to_bytes(LENGTH_FIELD_LEN, 'little'))
+ return enc_contents_len + aead_ciphertext
+</pre>
+
+<pre>
+CHACHA20POLY1305_EXPANSION = 16
+
+def v2_receive_packet(peer, aad=b'', skip_decoy=True):
+ while True:
+ enc_contents_len = receive(peer, LENGTH_FIELD_LEN)
+ contents_len = int.from_bytes(peer.recv_L.crypt(enc_contents_len), 'little')
+ aead_ciphertext = receive(peer, HEADER_LEN + contents_len + CHACHA20POLY1305_EXPANSION)
+ plaintext = peer.recv_P.decrypt(aead_ciphertext)
+ if plaintext is None:
+ disconnect(peer)
+ break
+ header = plaintext[:HEADER_LEN]
+ if not (skip_decoy and header[0] & (1 << IGNORE_BIT_POS)):
+ return plaintext[HEADER_LEN:]
+</pre>
+
+==== Performance ====
+
+Each v1 P2P message uses a double-SHA256 checksum truncated to 4 bytes. Roughly the same amount of computation power is required for encrypting and authenticating a v2 P2P message as proposed.
+
+=== Application layer specification ===
+==== v2 Bitcoin P2P message structure ====
+v2 Bitcoin P2P transport layer packets use the encrypted message structure shown above. An unencrypted application layer '''contents''' is composed of:
+
+{|class="wikitable"
+! Field !! Size in bytes !! Comments
+|-
+| <code>message_type</code> || 1 or 13 || either a one byte ID in range ''1..255'' or <code>b'\x00'</code> followed by a 12-byte ASCII message type (as in the v1 P2P protocol)
+|-
+| <code>message_payload</code> || <code>message_length</code> || message payload
+|}
+
+If the first byte of <code>message_type</code> is <code>b'\x00'</code>, the following 12 bytes are interpreted as an ASCII message type (as in the v1 P2P protocol), trailing padded with <code>b'\x00'</code> as necessary. If the first byte of <code>message_type</code> is in the range ''1..255'', it is interpreted as a message type ID. This structure results in smaller messages than the v1 protocol, as most messages sent/received will have a message type ID. We recommend reserving 1-byte type IDs for message types that are sent more than once per direction per connection.<ref name="smaller_messages">'''How do the lengths between v1 and v2 compare?''' For messages that use the 1-byte short message type ID, v2 packets use 3 bytes less per message than v1.</ref><ref name"fixed_length_long_ids">'''Why not allow variable length long message type IDs?''' Allowing for variable length long IDs reduces the available 1-byte ID space by 12 (to encode the length itself) and incentivizes less descriptive message types. In addition, limiting message types to fixed lengths of 1 or 13 hampers traffic analysis.</ref>
+
+The following table lists currently defined message type IDs:
+
+{| class="wikitable"
+|-
+!
+!0
+!1
+!2
+!3
+|-
+!+0
+|(12 bytes follow)||<code>ADDR</code>||<code>BLOCK</code>||<code>BLOCKTXN</code>
+|-
+!+4
+|<code>CMPCTBLOCK</code>||<code>FEEFILTER</code>||<code>FILTERADD</code>||<code>FILTERCLEAR</code>
+|-
+!+8
+|<code>FILTERLOAD</code>||<code>GETBLOCKS</code>||<code>GETBLOCKTXN</code>||<code>GETDATA</code>
+|-
+!+12
+|<code>GETHEADERS</code>||<code>HEADERS</code>||<code>INV</code>||<code>MEMPOOL</code>
+|-
+!+16
+|<code>MERKLEBLOCK</code>||<code>NOTFOUND</code>||<code>PING</code>||<code>PONG</code>
+|-
+!+20
+|<code>SENDCMPCT</code>||<code>TX</code>||<code>GETCFILTERS</code>||<code>CFILTER</code>
+|-
+!+24
+|<code>GETCFHEADERS</code>||<code>CFHEADERS</code>||<code>GETCFCHECKPT</code>||<code>CFCHECKPT</code>
+|-
+!+28
+|<code>ADDRV2</code>||<code>REQRECON</code>||<code>SKETCH</code>||<code>REQSKETCHEXT</code>
+|-
+!+32
+|<code>RECONCILDIFF</code>
+|-
+!&geq;33
+|| colspan="4" | (undefined)
+|}
+
+
+Additional message types may be added separately after BIP finalization.
+
+=== Signaling specification ===
+==== Signaling v2 support ====
+Peers supporting the v2 transport protocol signal support by advertising the <code>NODE_P2P_V2 = (1 << 11)</code> service flag in addr relay. If met with immediate disconnection when establishing a v2 connection, clients implementing this proposal are encouraged to retry connecting using the v1 protocol.<ref>'''Why are v2 clients met with immediate disconnection encouraged to retry with a v1 connection?''' Service flags propagated through untrusted intermediaries using ADDR and ADDRV2 P2P messages and are OR'ed when received from multiple sources. An untrusted intermediary could falsely advertise a potential peer as supportive of v2 connections. Connection downgrades to v1 mitigate the risk of a network participant being blackholed via false advertising.</ref>
+
+
+== Test Vectors ==
+
+For development and testing purposes, we provide a collection of test vectors in CSV format, and a naive, highly inefficient, [[bip-0324/reference.py|reference implementation]] of the relevant algorithms. This code is for demonstration purposes only:
+* [[bip-0324/ellswift_decode_test_vectors.csv|XElligatorSwift decoding vectors]] provide examples of ElligatorSwift-encoded public keys, and the X coordinate they map to.
+* [[bip-0324/xswiftec_inv_test_vectors.csv|XSwiftECInv vectors]] provide examples of ''(u, x)'' pairs, and the various ''t'' values that ''xswiftec_inv'' maps them to.
+* [[bip-0324/packet_encoding_test_vectors.csv|Packet encoding vectors]] illustrate the lifecycle of the authenticated encryption scheme proposed in this document.
+
+== Rationale and References ==
+<references/>
+
+== Acknowledgements ==
+Thanks to everyone (last name order) that helped invent and develop the ideas in this proposal:
+
+* Matt Corallo
+* Lloyd Fournier
+* Gregory Maxwell
+* Anthony Towns
diff --git a/bip-0324/ellswift_decode_test_vectors.csv b/bip-0324/ellswift_decode_test_vectors.csv
new file mode 100644
index 0000000..1bab96b
--- /dev/null
+++ b/bip-0324/ellswift_decode_test_vectors.csv
@@ -0,0 +1,77 @@
+ellswift,x,comment
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2)
+000000000000000000000000000000000000000000000000000000000000000001d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771,b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c,u%p=0;valid_x(x1)
+000000000000000000000000000000000000000000000000000000000000000082277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f,f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+00000000000000000000000000000000000000000000000000000000000000008421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0,9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0,u%p=0;valid_x(x2)
+0000000000000000000000000000000000000000000000000000000000000000bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b,u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3)
+0000000000000000000000000000000000000000000000000000000000000000d19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42,70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff,u%p=0;valid_x(x3)
+0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);t>=p
+0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5,50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d,1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e,u%p=0;valid_x(x2);t>=p
+0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7,12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e,u%p=0;valid_x(x1);t>=p
+0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9,7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783,u%p=0;valid_x(x3);t>=p
+0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f8530000000000000000000000000000000000000000000000000000000000000000,532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688,t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f853fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688,t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646,74e880b3ffd18fe3cddf7902522551ddf97fa4a35a3cfda8197f947081a57b8f,valid_x(x3)
+0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896,377b643fce2271f64e5c8101566107c1be4980745091783804f654781ac9217c,valid_x(x2);t>=p
+123658444f32be8f02ea2034afa7ef4bbe8adc918ceb49b12773b625f490b368ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8dc5fe11,ed16d65cf3a9538fcb2c139f1ecbc143ee14827120cbc2659e667256800b8142,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+146f92464d15d36e35382bd3ca5b0f976c95cb08acdcf2d5b3570617990839d7ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3145e93b,0d5cd840427f941f65193079ab8e2e83024ef2ee7ca558d88879ffd879fb6657,(u'^3+t'^2+7)%p=0;valid_x(x3);t>=p
+15fdf5cf09c90759add2272d574d2bb5fe1429f9f3c14c65e3194bf61b82aa73ffffffffffffffffffffffffffffffffffffffffffffffffffffffff04cfd906,16d0e43946aec93f62d57eb8cde68951af136cf4b307938dd1447411e07bffe1,(u'^3+t'^2+7)%p=0;valid_x(x2);t>=p
+1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d50000000000000000000000000000000000000000000000000000000000000000,025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c,t%p=0;valid_x(x2)
+1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d5fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c,t%p=0;valid_x(x2);t>=p
+1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,98bec3b2a351fa96cfd191c1778351931b9e9ba9ad1149f6d9eadca80981b801,t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+4056a34a210eec7892e8820675c860099f857b26aad85470ee6d3cf1304a9dcf375e70374271f20b13c9986ed7d3c17799698cfc435dbed3a9f34b38c823c2b4,868aac2003b29dbcad1a3e803855e078a89d16543ac64392d122417298cec76e,(u'^3-t'^2+7)%p=0;valid_x(x3)
+4197ec3723c654cfdd32ab075506648b2ff5070362d01a4fff14b336b78f963fffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3ab1e95,ba5a6314502a8952b8f456e085928105f665377a8ce27726a5b0eb7ec1ac0286,(u'^3+t'^2+7)%p=0;valid_x(x1);t>=p
+47eb3e208fedcdf8234c9421e9cd9a7ae873bfbdbc393723d1ba1e1e6a8e6b24ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd12cb1,d192d52007e541c9807006ed0468df77fd214af0a795fe119359666fdcf08f7c,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+5eb9696a2336fe2c3c666b02c755db4c0cfd62825c7b589a7b7bb442e141c1d693413f0052d49e64abec6d5831d66c43612830a17df1fe4383db896468100221,ef6e1da6d6c7627e80f7a7234cb08a022c1ee1cf29e4d0f9642ae924cef9eb38,(u'^3+t'^2+7)%p=0;valid_x(x1)
+7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0e0000000000000000000000000000000000000000000000000000000000000000,50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff,t%p=0;valid_x(x1)
+7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff,t%p=0;valid_x(x1);t>=p
+851b1ca94549371c4f1f7187321d39bf51c6b7fb61f7cbf027c9da62021b7a65fc54c96837fb22b362eda63ec52ec83d81bedd160c11b22d965d9f4a6d64d251,3e731051e12d33237eb324f2aa5b16bb868eb49a1aa1fadc19b6e8761b5a5f7b,(u'^3+t'^2+7)%p=0;valid_x(x2)
+943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f91250000000000000000000000000000000000000000000000000000000000000000,311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f9125fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+a0f18492183e61e8063e573606591421b06bc3513631578a73a39c1c3306239f2f32904f0d2a33ecca8a5451705bb537d3bf44e071226025cdbfd249fe0f7ad6,97a09cf1a2eae7c494df3c6f8a9445bfb8c09d60832f9b0b9d5eabe25fbd14b9,valid_x(x1)
+a1ed0a0bd79d8a23cfe4ec5fef5ba5cccfd844e4ff5cb4b0f2e71627341f1c5b17c499249e0ac08d5d11ea1c2c8ca7001616559a7994eadec9ca10fb4b8516dc,65a89640744192cdac64b2d21ddf989cdac7500725b645bef8e2200ae39691f2,valid_x(x2)
+ba94594a432721aa3580b84c161d0d134bc354b690404d7cd4ec57c16d3fbe98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffea507dd7,5e0d76564aae92cb347e01a62afd389a9aa401c76c8dd227543dc9cd0efe685a,valid_x(x1);t>=p
+bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a,2d97f96cac882dfe73dc44db6ce0f1d31d6241358dd5d74eb3d3b50003d24c2b,valid_x(x3);valid_x(x2);valid_x(x1)
+bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3ffffffffffffffffffffffffffffffffffffffffffffffffffffffff6507d09a,e7008afe6e8cbd5055df120bd748757c686dadb41cce75e4addcc5e02ec02b44,valid_x(x3);valid_x(x2);valid_x(x1);t>=p
+c5981bae27fd84401c72a155e5707fbb811b2b620645d1028ea270cbe0ee225d4b62aa4dca6506c1acdbecc0552569b4b21436a5692e25d90d3bc2eb7ce24078,948b40e7181713bc018ec1702d3d054d15746c59a7020730dd13ecf985a010d7,(u'^3+t'^2+7)%p=0;valid_x(x3)
+c894ce48bfec433014b931a6ad4226d7dbd8eaa7b6e3faa8d0ef94052bcf8cff336eeb3919e2b4efb746c7f71bbca7e9383230fbbc48ffafe77e8bcc69542471,f1c91acdc2525330f9b53158434a4d43a1c547cff29f15506f5da4eb4fe8fa5a,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+cbb0deab125754f1fdb2038b0434ed9cb3fb53ab735391129994a535d925f6730000000000000000000000000000000000000000000000000000000000000000,872d81ed8831d9998b67cb7105243edbf86c10edfebb786c110b02d07b2e67cd,t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+d917b786dac35670c330c9c5ae5971dfb495c8ae523ed97ee2420117b171f41effffffffffffffffffffffffffffffffffffffffffffffffffffffff2001f6f6,e45b71e110b831f2bdad8651994526e58393fde4328b1ec04d59897142584691,valid_x(x3);t>=p
+e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb4260000000000000000000000000000000000000000000000000000000000000000,66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5,t%p=0;valid_x(x3)
+e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb426fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5,t%p=0;valid_x(x3);t>=p
+e7ee5814c1706bf8a89396a9b032bc014c2cac9c121127dbf6c99278f8bb53d1dfd04dbcda8e352466b6fcd5f2dea3e17d5e133115886eda20db8a12b54de71b,e842c6e3529b234270a5e97744edc34a04d7ba94e44b6d2523c9cf0195730a50,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
+f292e46825f9225ad23dc057c1d91c4f57fcb1386f29ef10481cb1d22518593fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7011c989,3cea2c53b8b0170166ac7da67194694adacc84d56389225e330134dab85a4d55,(u'^3-t'^2+7)%p=0;valid_x(x3);t>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771,b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c,u%p=0;valid_x(x1);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f4218f20ae6c646b363db68605822fb14264ca8d2587fdd6fbc750d587e76a7ee,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b,u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f,f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0,9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0,u%p=0;valid_x(x2);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fd19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42,70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff,u%p=0;valid_x(x3);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);u>=p;t>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5,50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d,1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e,u%p=0;valid_x(x2);u>=p;t>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7,12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e,u%p=0;valid_x(x1);u>=p;t>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9,7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783,u%p=0;valid_x(x3);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a70000000000000000000000000000000000000000000000000000000000000000,649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb,t%p=0;valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb,t%p=0;valid_x(x1);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff15028c590063f64d5a7f1c14915cd61eac886ab295bebd91992504cf77edb028bdd6267f,3fde5713f8282eead7d39d4201f44a7c85a5ac8a0681f35e54085c6b69543374,(u'^3+t'^2+7)%p=0;valid_x(x2);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de860000000000000000000000000000000000000000000000000000000000000000,3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de86fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2c2c5709e7156c417717f2feab147141ec3da19fb759575cc6e37b2ea5ac9309f26f0f66,d2469ab3e04acbb21c65a1809f39caafe7a77c13d10f9dd38f391c01dc499c52,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a08cc1efffffffffffffffffffffffffffffffffffffffffffffffffffffffff760e9f0,38e2a5ce6a93e795e16d2c398bc99f0369202ce21e8f09d56777b40fc512bccc,valid_x(x3);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e91257d932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a,864b3dc902c376709c10a93ad4bbe29fce0012f3dc8672c6286bba28d7d6d6fc,valid_x(x3);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff795d6c1c322cadf599dbb86481522b3cc55f15a67932db2afa0111d9ed6981bcd124bf44,766dfe4a700d9bee288b903ad58870e3d4fe2f0ef780bcac5c823f320d9a9bef,(u'^3+t'^2+7)%p=0;valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8e426f0392389078c12b1a89e9542f0593bc96b6bfde8224f8654ef5d5cda935a3582194,faec7bc1987b63233fbc5f956edbf37d54404e7461c58ab8631bc68e451a0478,valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff91192139ffffffffffffffffffffffffffffffffffffffffffffffffffffffff45f0f1eb,ec29a50bae138dbf7d8e24825006bb5fc1a2cc1243ba335bc6116fb9e498ec1f,valid_x(x2);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff98eb9ab76e84499c483b3bf06214abfe065dddf43b8601de596d63b9e45a166a580541fe,1e0ff2dee9b09b136292a9e910f0d6ac3e552a644bba39e64e9dd3e3bbd3d4d4,(u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646,8b7dd5c3edba9ee97b70eff438f22dca9849c8254a2f3345a0a572ffeaae0928,valid_x(x2);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896,0881950c8f51d6b9a6387465d5f12609ef1bb25412a08a74cb2dfb200c74bfbf,valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa2f5cd838816c16c4fe8a1661d606fdb13cf9af04b979a2e159a09409ebc8645d58fde02,2f083207b9fd9b550063c31cd62b8746bd543bdc5bbf10e3a35563e927f440c8,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c00000000000000000000000000000000000000000000000000000000000000000,4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0,t%p=0;valid_x(x3);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c0fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0,t%p=0;valid_x(x3);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8d0000000000000000000000000000000000000000000000000000000000000000,16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2,t%p=0;valid_x(x2);u>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8dfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2,t%p=0;valid_x(x2);u>=p;t>=p
+ffffffffffffffffffffffffffffffffffffffffffffffffffffffffef64d162750546ce42b0431361e52d4f5242d8f24f33e6b1f99b591647cbc808f462af51,d41244d11ca4f65240687759f95ca9efbab767ededb38fd18c36e18cd3b6f6a9,(u'^3+t'^2+7)%p=0;valid_x(x3);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0e5be52372dd6e894b2a326fc3605a6e8f3c69c710bf27d630dfe2004988b78eb6eab36,64bf84dd5e03670fdb24c0f5d3c2c365736f51db6c92d95010716ad2d36134c8,valid_x(x3);valid_x(x2);valid_x(x1);u>=p
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffefbb982fffffffffffffffffffffffffffffffffffffffffffffffffffffffff6d6db1f,1c92ccdfcf4ac550c28db57cff0c8515cb26936c786584a70114008d6c33a34b,valid_x(x1);u>=p;t>=p
diff --git a/bip-0324/garbage_terminator.png b/bip-0324/garbage_terminator.png
new file mode 100644
index 0000000..536763e
--- /dev/null
+++ b/bip-0324/garbage_terminator.png
Binary files differ
diff --git a/bip-0324/gen_test_vectors.py b/bip-0324/gen_test_vectors.py
new file mode 100644
index 0000000..05b30a8
--- /dev/null
+++ b/bip-0324/gen_test_vectors.py
@@ -0,0 +1,418 @@
+"""Generate the BIP-0324 test vectors."""
+
+import csv
+import hashlib
+import os
+import sys
+from reference import (
+ FE,
+ GE,
+ MINUS_3_SQRT,
+ hkdf_sha256,
+ SECP256K1_G,
+ ellswift_decode,
+ ellswift_ecdh_xonly,
+ xswiftec_inv,
+ xswiftec,
+ v2_ecdh,
+ initialize_v2_transport,
+ v2_enc_packet
+)
+
+FILENAME_PACKET_TEST = os.path.join(sys.path[0], 'packet_encoding_test_vectors.csv')
+FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
+FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
+
+def xswiftec_flagged(u, t, simplified=False):
+ """A variant of xswiftec which also returns 'flags', describing conditions encountered."""
+ flags = []
+ if u == 0:
+ flags.append("u%p=0")
+ u = FE(1)
+ if t == 0:
+ flags.append("t%p=0")
+ t = FE(1)
+ if u**3 + t**2 + 7 == 0:
+ flags.append("(u'^3+t'^2+7)%p=0")
+ t = 2 * t
+ X = (u**3 + 7 - t**2) / (2 * t)
+ Y = (X + t) / (MINUS_3_SQRT * u)
+ if X == 0:
+ if not simplified:
+ flags.append("(u'^3-t'^2+7)%p=0")
+ x3 = u + 4 * Y**2
+ if GE.is_valid_x(x3):
+ flags.append("valid_x(x3)")
+ x2 = (-X / Y - u) / 2
+ if GE.is_valid_x(x2):
+ flags.append("valid_x(x2)")
+ x1 = (X / Y - u) / 2
+ if GE.is_valid_x(x1):
+ flags.append("valid_x(x1)")
+ for x in (x3, x2, x1):
+ if GE.is_valid_x(x):
+ break
+ return x, flags
+
+
+def ellswift_create_deterministic(seed, features):
+ """This is a variant of ellswift_create which doesn't use randomness.
+
+ features is an integer selecting some properties of the result:
+ - (f & 3) == 0: only x1 is valid on decoding (see xswiftec{_flagged})
+ - (f & 3) == 1: only x2 is valid on decoding
+ - (f & 3) == 2: only x3 is valid on decoding
+ - (f & 3) == 3: x1,x2,x3 are all valid on decoding
+ - (f & 4) == 4: u >= p
+ - (f & 8) == 8: u mod n == 0
+
+ Returns privkey, ellswift
+ """
+
+ cnt = 0
+ while True:
+ sec = hkdf_sha256(32, seed, (cnt).to_bytes(4, 'little'), b"sec")
+ xval = (int.from_bytes(sec, 'big') * SECP256K1_G).x
+ cnt += 1
+ if features & 8:
+ u = 0
+ if features & 4:
+ u += FE.SIZE
+ else:
+ udat = hkdf_sha256(64, seed, (cnt).to_bytes(4, 'little'), b"u")
+ if features & 4:
+ u = FE.SIZE + 1 + int.from_bytes(udat, 'big') % (2**256 - FE.SIZE - 1)
+ else:
+ u = 1 + int.from_bytes(udat, 'big') % (FE.SIZE - 1)
+ case = hkdf_sha256(1, seed, (cnt).to_bytes(4, 'little'), b"case")[0] & 7
+ coru = FE(u) + ((features & 8) == 8)
+ t = xswiftec_inv(xval, coru, case)
+ if t is None:
+ continue
+ assert xswiftec(FE(u), t) == xval
+ x2, flags = xswiftec_flagged(FE(u), t)
+ assert x2 == xval
+ have_x1 = "valid_x(x1)" in flags
+ have_x2 = "valid_x(x2)" in flags
+ have_x3 = "valid_x(x3)" in flags
+ if (features & 4) == 0 and not (have_x1 and not have_x2 and not have_x3):
+ continue
+ if (features & 4) == 1 and not (not have_x1 and have_x2 and not have_x3):
+ continue
+ if (features & 4) == 2 and not (not have_x1 and not have_x2 and have_x3):
+ continue
+ if (features & 4) == 3 and not (have_x1 and have_x2 and have_x3):
+ continue
+ return sec, u.to_bytes(32, 'big') + t.to_bytes()
+
+def ellswift_decode_flagged(ellswift, simplified=False):
+ """Decode a 64-byte ElligatorSwift encoded coordinate, returning byte array + flag string."""
+ uv = int.from_bytes(ellswift[:32], 'big')
+ tv = int.from_bytes(ellswift[32:], 'big')
+ x, flags = xswiftec_flagged(FE(uv), FE(tv))
+ if not simplified:
+ if uv >= FE.SIZE:
+ flags.append("u>=p")
+ if tv >= FE.SIZE:
+ flags.append("t>=p")
+ return int(x).to_bytes(32, 'big'), ";".join(flags)
+
+def random_fe_int(_, seed, i, p):
+ """Function to use in tuple_expand, generating a random integer in 0..p-1."""
+ rng_out = hkdf_sha256(64, seed, i.to_bytes(4, 'little'), b"v%i_fe" % p)
+ return int.from_bytes(rng_out, 'big') % FE.SIZE
+
+def random_fe_int_high(_, seed, i, p):
+ """Function to use in tuple_expand, generating a random integer in p..2^256-1."""
+ rng_out = hkdf_sha256(64, seed, i.to_bytes(4, 'little'), b"v%i_fe_high" % p)
+ return FE.SIZE + int.from_bytes(rng_out, 'big') % (2**256 - FE.SIZE)
+
+def fn_of(p_in, fn):
+ """Function to use in tuple_expand, to pick one variable in function of another."""
+ def inner(vs, _seed, _i, p):
+ assert p != p_in
+ if isinstance(vs[p_in], int):
+ return fn(vs[p_in])
+ return None
+ return inner
+
+def tuple_expand(out, tuplespec, prio, seed=None, cnt=1):
+ """Given a tuple specification, expand it cnt times, and add results to out.
+
+ Expansion is defined recursively:
+ - If any of the spec elements is a list, each element of the list results
+ in an expansion (by replacing the list with its element).
+ - If any of the spec elements is a function, that function is invoked with
+ (spec, seed, expansion count, index in spec) as arguments. If the function
+ needs to wait for other indices to be expanded, it can return None.
+
+ The output consists of (prio, expansion count, SHA256(result), result, seed)
+ tuples."""
+
+ def recurse(vs, seed, i, change_pos=None, change=None):
+ if change_pos is not None:
+ vs = list(vs)
+ vs[change_pos] = change
+ for p, v in enumerate(vs):
+ if v is None:
+ return
+ if isinstance(v, list):
+ for ve in v:
+ recurse(vs, seed, i, p, ve)
+ return
+ if callable(v):
+ res = v(vs, seed, i, p)
+ if res is not None:
+ recurse(vs, seed, i, p, res)
+ return
+ h = hashlib.sha256()
+ for v in vs:
+ h.update(int(v).to_bytes(32, 'big'))
+ out.append((prio, i, h.digest(), vs, seed))
+ for i in range(cnt):
+ recurse(tuplespec, seed, i)
+
+def gen_ellswift_decode_cases(seed, simplified=False):
+ """Generate a set of interesting (ellswift, x, flags) ellswift decoding cases."""
+ inputs = []
+
+ # Aggregate for use in tuple_expand, expanding to int in 0..p-1, and one in p..2^256-1.
+ RANDOM_VAL = [random_fe_int, random_fe_int_high]
+ # Aggregate for use in tuple_expand, expanding to integers which %p equal 0.
+ ZERO_VAL = [0, FE.SIZE]
+ # Helpers for constructing u and t values such that u^3+t^2+7=0 or u^3-t^2+7=0.
+ T_FOR_SUM_ZERO = fn_of(0, lambda u: (-FE(u)**3 - 7).sqrts())
+ T_FOR_DIFF_ZERO = fn_of(0, lambda u: (FE(u)**3 + 7).sqrts())
+ U_FOR_SUM_ZERO = fn_of(1, lambda t: (-FE(t)**2 - 7).cbrts())
+ U_FOR_DIFF_ZERO = fn_of(1, lambda t: (FE(t)**2 - 7).cbrts())
+
+ tuple_expand(inputs, [RANDOM_VAL, RANDOM_VAL], 0, seed + b"random", 64)
+ tuple_expand(inputs, [RANDOM_VAL, T_FOR_SUM_ZERO], 1, seed + b"t=sqrt(-u^3-7)", 64)
+ tuple_expand(inputs, [U_FOR_SUM_ZERO, RANDOM_VAL], 1, seed + b"u=cbrt(-t^2-7)", 64)
+ tuple_expand(inputs, [RANDOM_VAL, T_FOR_DIFF_ZERO], 1, seed + b"t=sqrt(u^3+7)", 64)
+ tuple_expand(inputs, [U_FOR_DIFF_ZERO, RANDOM_VAL], 1, seed + b"u=cbrt(t^2-7)", 64)
+ tuple_expand(inputs, [ZERO_VAL, RANDOM_VAL], 2, seed + b"u=0", 64)
+ tuple_expand(inputs, [RANDOM_VAL, ZERO_VAL], 2, seed + b"t=0", 64)
+ tuple_expand(inputs, [ZERO_VAL, FE(8).sqrts()], 3, seed + b"u=0;t=sqrt(8)")
+ tuple_expand(inputs, [FE(-8).cbrts(), ZERO_VAL], 3, seed + b"t=0;u=cbrt(-8)")
+ tuple_expand(inputs, [FE(-6).cbrts(), ZERO_VAL], 3, seed + b"t=0;u=cbrt(-6)")
+ tuple_expand(inputs, [ZERO_VAL, ZERO_VAL], 3, seed + b"u=0;t=0")
+ # Unused.
+ tuple_expand(inputs, [ZERO_VAL, FE(-8).sqrts()], 4, seed + b"u=0;t=sqrt(-8)")
+
+ seen = set()
+ cases = []
+ for _prio, _cnt, _hash, vs, _seed in sorted(inputs):
+ inp = int(vs[0]).to_bytes(32, 'big') + int(vs[1]).to_bytes(32, 'big')
+ outp, flags = ellswift_decode_flagged(inp, simplified)
+ if flags not in seen:
+ cases.append((inp, outp, flags))
+ seen.add(flags)
+
+ return cases
+
+def gen_all_ellswift_decode_vectors(fil):
+ """Generate all xelligatorswift decoding test vectors."""
+
+ cases = gen_ellswift_decode_cases(b"")
+ writer = csv.DictWriter(fil, ["ellswift", "x", "comment"])
+ writer.writeheader()
+ for val, x, flags in sorted(cases):
+ writer.writerow({"ellswift": val.hex(), "x": x.hex(), "comment": flags})
+
+def xswiftec_inv_flagged(x, u, case):
+ """A variant of xswiftec_inv which also returns flags, describing conditions encountered."""
+
+ flags = []
+
+ if case & 2 == 0:
+ if GE.is_valid_x(-x - u):
+ flags.append("bad[valid_x(-x-u)]")
+ return None, flags
+ v = x if case & 1 == 0 else -x - u
+ if v == 0:
+ flags.append("info[v=0]")
+ s = -(u**3 + 7) / (u**2 + u*v + v**2)
+ assert s != 0 # would imply X=0 on curve
+ else:
+ s = x - u
+ if s == 0:
+ flags.append("bad[s=0]")
+ return None, flags
+ q = (-s * (4 * (u**3 + 7) + 3 * s * u**2))
+ if q == 0:
+ flags.append("info[q=0]")
+ r = q.sqrt()
+ if r is None:
+ flags.append("bad[non_square(q)]")
+ return None, flags
+ if case & 1:
+ if r == 0:
+ flags.append("bad[r=0]")
+ return None, flags
+ r = -r
+ v = (-u + r / s) / 2
+ if v == 0:
+ flags.append("info[v=0]")
+ w = s.sqrt()
+ assert w != 0
+ if w is None:
+ flags.append("bad[non_square(s)]")
+ return None, flags
+ if case & 4:
+ w = -w
+ Y = w / 2
+ assert Y != 0
+ X = 2 * Y * (v + u / 2)
+ if X == 0:
+ flags.append("info[X=0]")
+ flags.append("ok")
+ return w * (u * (MINUS_3_SQRT - 1) / 2 - v), flags
+
+def xswiftec_inv_combo_flagged(x, u):
+ """Compute the aggregate results and flags from xswiftec_inv_flagged for case=0..7."""
+ ts = []
+ allflags = []
+ for case in range(8):
+ t, flags = xswiftec_inv_flagged(x, u, case)
+ if t is not None:
+ assert x == xswiftec(u, t)
+ ts.append(t)
+ allflags.append(f"case{case}:{'&'.join(flags)}")
+ return ts, ";".join(allflags)
+
+def gen_all_xswiftec_inv_vectors(fil):
+ """Generate all xswiftec_inv test vectors."""
+
+ # Two constants used below. Compute them only once.
+ C1 = (FE(MINUS_3_SQRT) - 1) / 2
+ C2 = (-FE(MINUS_3_SQRT) - 1) / 2
+ # Helper functions that pick x and u with special properties.
+ TRIGGER_Q_ZERO = fn_of(1, lambda u: (FE(u)**3 + 28) / (FE(-3) * FE(u)**2))
+ TRIGGER_DIVZERO_A = fn_of(1, lambda u: FE(u) * C1)
+ TRIGGER_DIVZERO_B = fn_of(1, lambda u: FE(u) * C2)
+ TRIGGER_V_ZERO = fn_of(1, lambda u: FE(-7) / FE(u)**2)
+ TRIGGER_X_ZERO = fn_of(0, lambda x: FE(-2) * FE(x))
+
+ inputs = []
+ tuple_expand(inputs, [random_fe_int, random_fe_int], 0, b"uniform", 256)
+ tuple_expand(inputs, [TRIGGER_Q_ZERO, random_fe_int], 1, b"x=-(u^3+28)/(3*u^2)", 64)
+ tuple_expand(inputs, [TRIGGER_V_ZERO, random_fe_int], 1, b"x=-7/u^2", 512)
+ tuple_expand(inputs, [random_fe_int, fn_of(0, lambda x: x)], 2, b"u=x", 64)
+ tuple_expand(inputs, [random_fe_int, fn_of(0, lambda x: -FE(x))], 2, b"u=-x", 64)
+ # Unused.
+ tuple_expand(inputs, [TRIGGER_DIVZERO_A, random_fe_int], 3, b"x=u*(sqrt(-3)-1)/2", 64)
+ tuple_expand(inputs, [TRIGGER_DIVZERO_B, random_fe_int], 3, b"x=u*(-sqrt(-3)-1)/2", 64)
+ tuple_expand(inputs, [random_fe_int, TRIGGER_X_ZERO], 3, b"u=-2x", 64)
+
+ seen = set()
+ cases = []
+ for _prio, _cnt, _hash, vs, _seed in sorted(inputs):
+ x, u = FE(vs[0]), FE(vs[1])
+ if u == 0:
+ continue
+ if not GE.is_valid_x(x):
+ continue
+ ts, flags = xswiftec_inv_combo_flagged(x, u)
+ if flags not in seen:
+ cases.append((int(u), int(x), ts, flags))
+ seen.add(flags)
+
+ writer = csv.DictWriter(fil, ["u", "x"] + [f"case{c}_t" for c in range(8)] + ["comment"])
+ writer.writeheader()
+ for u, x, ts, flags in sorted(cases):
+ row = {"u": FE(u), "x": FE(x), "comment": flags}
+ for c in range(8):
+ if ts[c] is not None:
+ row[f"case{c}_t"] = FE(ts[c])
+ writer.writerow(row)
+
+def gen_packet_encoding_vector(case):
+ """Given a dict case with specs, construct a packet_encoding test vector as a CSV line."""
+ ikm = str(case).encode('utf-8')
+ in_initiating = case["init"]
+ in_ignore = int(case["ignore"])
+ in_priv_ours, in_ellswift_ours = ellswift_create_deterministic(ikm, case["features"])
+ mid_x_ours = (int.from_bytes(in_priv_ours, 'big') * SECP256K1_G).x.to_bytes()
+ assert mid_x_ours == ellswift_decode(in_ellswift_ours)
+ in_ellswift_theirs = case["theirs"]
+ in_contents = hkdf_sha256(case["contentlen"], ikm, b"contents", b"")
+ contents = in_contents * case["multiply"]
+ in_aad = hkdf_sha256(case["aadlen"], ikm, b"aad", b"")
+ mid_shared_secret = v2_ecdh(in_priv_ours, in_ellswift_theirs, in_ellswift_ours, in_initiating)
+
+ peer = initialize_v2_transport(mid_shared_secret, in_initiating)
+ for _ in range(case["idx"]):
+ v2_enc_packet(peer, b"")
+ ciphertext = v2_enc_packet(peer, contents, in_aad, case["ignore"])
+ long_msg = len(ciphertext) > 128
+
+ return {
+ "in_idx": case['idx'],
+ "in_priv_ours": in_priv_ours.hex(),
+ "in_ellswift_ours": in_ellswift_ours.hex(),
+ "in_ellswift_theirs": in_ellswift_theirs.hex(),
+ "in_initiating": int(in_initiating),
+ "in_contents": in_contents.hex(),
+ "in_multiply": case['multiply'],
+ "in_aad": in_aad.hex(),
+ "in_ignore": in_ignore,
+ "mid_x_ours": mid_x_ours.hex(),
+ "mid_x_theirs": ellswift_decode(in_ellswift_theirs).hex(),
+ "mid_x_shared": ellswift_ecdh_xonly(in_ellswift_theirs, in_priv_ours).hex(),
+ "mid_shared_secret": mid_shared_secret.hex(),
+ "mid_initiator_l": peer['initiator_L'].hex(),
+ "mid_initiator_p": peer['initiator_P'].hex(),
+ "mid_responder_l": peer['responder_L'].hex(),
+ "mid_responder_p": peer['responder_P'].hex(),
+ "mid_send_garbage_terminator": peer["send_garbage_terminator"].hex(),
+ "mid_recv_garbage_terminator": peer["recv_garbage_terminator"].hex(),
+ "out_session_id": peer["session_id"].hex(),
+ "out_ciphertext": "" if long_msg else ciphertext.hex(),
+ "out_ciphertext_endswith": ciphertext[-128:].hex() if long_msg else ""
+ }
+
+def gen_all_packet_encoding_vectors(fil):
+ """Return a list of CSV lines, one for each packet encoding vector."""
+
+ ellswift = gen_ellswift_decode_cases(b"simplified_", simplified=True)
+ ellswift.sort(key=lambda x: hashlib.sha256(b"simplified:" + x[0]).digest())
+
+ fields = [
+ "in_idx", "in_priv_ours", "in_ellswift_ours", "in_ellswift_theirs", "in_initiating",
+ "in_contents", "in_multiply", "in_aad", "in_ignore", "mid_x_ours", "mid_x_theirs",
+ "mid_x_shared", "mid_shared_secret", "mid_initiator_l", "mid_initiator_p",
+ "mid_responder_l", "mid_responder_p", "mid_send_garbage_terminator",
+ "mid_recv_garbage_terminator", "out_session_id", "out_ciphertext", "out_ciphertext_endswith"
+ ]
+
+ writer = csv.DictWriter(fil, fields)
+ writer.writeheader()
+ for case in [
+ {"init": True, "contentlen": 1, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 1,
+ "theirs": ellswift[0][0], "features": 0},
+ {"init": False, "contentlen": 17, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 999,
+ "theirs": ellswift[1][0], "features": 1},
+ {"init": True, "contentlen": 63, "multiply": 1, "aadlen": 4095, "ignore": False, "idx": 0,
+ "theirs": ellswift[2][0], "features": 2},
+ {"init": False, "contentlen": 128, "multiply": 1, "aadlen": 0, "ignore": True, "idx": 223,
+ "theirs": ellswift[3][0], "features": 3},
+ {"init": True, "contentlen": 193, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 448,
+ "theirs": ellswift[4][0], "features": 4},
+ {"init": False, "contentlen": 41, "multiply": 97561, "aadlen": 0, "ignore": False,
+ "idx": 673, "theirs": ellswift[5][0], "features": 5},
+ {"init": True, "contentlen": 241, "multiply": 69615, "aadlen": 0, "ignore": True,
+ "idx": 1024, "theirs": ellswift[6][0], "features": 6},
+ ]:
+ writer.writerow(gen_packet_encoding_vector(case))
+
+if __name__ == "__main__":
+ print(f"Generating {FILENAME_PACKET_TEST}...")
+ with open(FILENAME_PACKET_TEST, "w", encoding="utf-8") as fil_packet:
+ gen_all_packet_encoding_vectors(fil_packet)
+ print(f"Generating {FILENAME_XSWIFTEC_INV_TEST}...")
+ with open(FILENAME_XSWIFTEC_INV_TEST, "w", encoding="utf-8") as fil_xswiftec_inv:
+ gen_all_xswiftec_inv_vectors(fil_xswiftec_inv)
+ print(f"Generating {FILENAME_ELLSWIFT_DECODE_TEST}...")
+ with open(FILENAME_ELLSWIFT_DECODE_TEST, "w", encoding="utf-8") as fil_ellswift_decode:
+ gen_all_ellswift_decode_vectors(fil_ellswift_decode)
diff --git a/bip-0324/packet_encoding_test_vectors.csv b/bip-0324/packet_encoding_test_vectors.csv
new file mode 100644
index 0000000..4f70b92
--- /dev/null
+++ b/bip-0324/packet_encoding_test_vectors.csv
@@ -0,0 +1,8 @@
+in_idx,in_priv_ours,in_ellswift_ours,in_ellswift_theirs,in_initiating,in_contents,in_multiply,in_aad,in_ignore,mid_x_ours,mid_x_theirs,mid_x_shared,mid_shared_secret,mid_initiator_l,mid_initiator_p,mid_responder_l,mid_responder_p,mid_send_garbage_terminator,mid_recv_garbage_terminator,out_session_id,out_ciphertext,out_ciphertext_endswith
+1,61062ea5071d800bbfd59e2e8b53d47d194b095ae5a4df04936b49772ef0d4d7,ec0adff257bbfe500c188c80b4fdd640f6b45a482bbc15fc7cef5931deff0aa186f6eb9bba7b85dc4dcc28b28722de1e3d9108b985e2967045668f66098e475b,a4a94dfce69b4a2a0a099313d10f9f7e7d649d60501c9e1d274c300e0d89aafaffffffffffffffffffffffffffffffffffffffffffffffffffffffff8faf88d5,1,8e,1,,0,19e965bc20fc40614e33f2f82d4eeff81b5e7516b12a5c6c0d6053527eba0923,0c71defa3fafd74cb835102acd81490963f6b72d889495e06561375bd65f6ffc,4eb2bf85bd00939468ea2abb25b63bc642e3d1eb8b967fb90caa2d89e716050e,c6992a117f5edbea70c3f511d32d26b9798be4b81a62eaee1a5acaa8459a3592,9a6478b5fbab1f4dd2f78994b774c03211c78312786e602da75a0d1767fb55cf,7d0c7820ba6a4d29ce40baf2caa6035e04f1e1cefd59f3e7e59e9e5af84f1f51,17bc726421e4054ac6a1d54915085aaa766f4d3cf67bbd168e6080eac289d15e,9f0fc1c0e85fd9a8eee07e6fc41dba2ff54c7729068a239ac97c37c524cca1c0,faef555dfcdb936425d84aba524758f3,02cb8ff24307a6e27de3b4e7ea3fa65b,ce72dffb015da62b0d0f5474cab8bc72605225b0cee3f62312ec680ec5f41ba5,7530d2a18720162ac09c25329a60d75adf36eda3c3,
+999,1f9c581b35231838f0f17cf0c979835baccb7f3abbbb96ffcc318ab71e6e126f,a1855e10e94e00baa23041d916e259f7044e491da6171269694763f018c7e63693d29575dcb464ac816baa1be353ba12e3876cba7628bd0bd8e755e721eb0140,fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000,0,3eb1d4e98035cfd8eeb29bac969ed3824a,1,,0,45b6f1f684fd9f2b16e2651ddc47156c0695c8c5cd2c0c9df6d79a1056c61120,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,c40eb6190caf399c9007254ad5e5fa20d64af2b41696599c59b2191d16992955,a0138f564f74d0ad70bc337dacc9d0bf1d2349364caf1188a1e6e8ddb3b7b184,b82a0a7ce7219777f914d2ab873c5c487c56bd7b68622594d67fe029a8fa7def,d760ba8f62dd3d29d7d5584e310caf2540285edc6b51c640f9497e99c3536fd2,9db0c6f9a903cbab5d7b3c58273a3421eec0001814ec53236bd405131a0d8e90,23d2b5e653e6a3a8db160a2ca03d11cb5a79983babba861fcb57c38413323c0c,efb64fd80acd3825ac9bc2a67216535a,b3cb553453bceb002897e751ff7588bf,9267c54560607de73f18c563b76a2442718879c52dd39852885d4a3c9912c9ea,1da1bcf589f9b61872f45b7fa5371dd3f8bdf5d515b0c5f9fe9f0044afb8dc0aa1cd39a8c4,
+0,0286c41cd30913db0fdff7a64ebda5c8e3e7cef10f2aebc00a7650443cf4c60d,d1ee8a93a01130cbf299249a258f94feb5f469e7d0f2f28f69ee5e9aa8f9b54a60f2c3ff2d023634ec7f4127a96cc11662e402894cf1f694fb9a7eaa5f1d9244,ffffffffffffffffffffffffffffffffffffffffffffffffffffffff22d5e441524d571a52b3def126189d3f416890a99d4da6ede2b0cde1760ce2c3f98457ae,1,054290a6c6ba8d80478172e89d32bf690913ae9835de6dcf206ff1f4d652286fe0ddf74deba41d55de3edc77c42a32af79bbea2c00bae7492264c60866ae5a,1,84932a55aac22b51e7b128d31d9f0550da28e6a3f394224707d878603386b2f9d0c6bcd8046679bfed7b68c517e7431e75d9dd34605727d2ef1c2babbf680ecc8d68d2c4886e9953a4034abde6da4189cd47c6bb3192242cf714d502ca6103ee84e08bc2ca4fd370d5ad4e7d06c7fbf496c6c7cc7eb19c40c61fb33df2a9ba48497a96c98d7b10c1f91098a6b7b16b4bab9687f27585ade1491ae0dba6a79e1e2d85dd9d9d45c5135ca5fca3f0f99a60ea39edbc9efc7923111c937913f225d67788d5f7e8852b697e26b92ec7bfcaa334a1665511c2b4c0a42d06f7ab98a9719516c8fd17f73804555ee84ab3b7d1762f6096b778d3cb9c799cbd49a9e4a325197b4e6cc4a5c4651f8b41ff88a92ec428354531f970263b467c77ed11312e2617d0d53fe9a8707f51f9f57a77bfb49afe3d89d85ec05ee17b9186f360c94ab8bb2926b65ca99dae1d6ee1af96cad09de70b6767e949023e4b380e66669914a741ed0fa420a48dbc7bfae5ef2019af36d1022283dd90655f25eec7151d471265d22a6d3f91dc700ba749bb67c0fe4bc0888593fbaf59d3c6fff1bf756a125910a63b9682b597c20f560ecb99c11a92c8c8c3f7fbfaa103146083a0ccaecf7a5f5e735a784a8820155914a289d57d8141870ffcaf588882332e0bcd8779efa931aa108dab6c3cce76691e345df4a91a03b71074d66333fd3591bff071ea099360f787bbe43b7b3dff2a59c41c7642eb79870222ad1c6f2e5a191ed5acea51134679587c9cf71c7d8ee290be6bf465c4ee47897a125708704ad610d8d00252d01959209d7cd04d5ecbbb1419a7e84037a55fefa13dee464b48a35c96bcb9a53e7ed461c3a1607ee00c3c302fd47cd73fda7493e947c9834a92d63dcfbd65aa7c38c3e3a2748bb5d9a58e7495d243d6b741078c8f7ee9c8813e473a323375702702b0afae1550c8341eedf5247627343a95240cb02e3e17d5dca16f8d8d3b2228e19c06399f8ec5c5e9dbe4caef6a0ea3ffb1d3c7eac03ae030e791fa12e537c80d56b55b764cadf27a8701052df1282ba8b5e3eb62b5dc7973ac40160e00722fa958d95102fc25c549d8c0e84bed95b7acb61ba65700c4de4feebf78d13b9682c52e937d23026fb4c6193e6644e2d3c99f91f4f39a8b9fc6d013f89c3793ef703987954dc0412b550652c01d922f525704d32d70d6d4079bc3551b563fb29577b3aecdc9505011701dddfd94830431e7a4918927ee44fb3831ce8c4513839e2deea1287f3fa1ab9b61a256c09637dbc7b4f0f8fbb783840f9c24526da883b0df0c473cf231656bd7bc1aaba7f321fec0971c8c2c3444bff2f55e1df7fea66ec3e440a612db9aa87bb505163a59e06b96d46f50d8120b92814ac5ab146bc78dbbf91065af26107815678ce6e33812e6bf3285d4ef3b7b04b076f21e7820dcbfdb4ad5218cf4ff6a65812d8fcb98ecc1e95e2fa58e3efe4ce26cd0bd400d6036ab2ad4f6c713082b5e3f1e04eb9e3b6c8f63f57953894b9e220e0130308e1fd91f72d398c1e7962ca2c31be83f31d6157633581a0a6910496de8d55d3d07090b6aa087159e388b7e7dec60f5d8a60d93ca2ae91296bd484d916bfaaa17c8f45ea4b1a91b37c82821199a2b7596672c37156d8701e7352aa48671d3b1bbbd2bd5f0a2268894a25b0cb2514af39c8743f8cce8ab4b523053739fd8a522222a09acf51ac704489cf17e4b7125455cb8f125b4d31af1eba1f8cf7f81a5a100a141a7ee72e8083e065616649c241f233645c5fc865d17f0285f5c52d9f45312c979bfb3ce5f2a1b951deddf280ffb3f370410cffd1583bfa90077835aa201a0712d1dcd1293ee177738b14e6b5e2a496d05220c3253bb6578d6aff774be91946a614dd7e879fb3dcf7451e0b9adb6a8c44f53c2c464bcc0019e9fad89cac7791a0a3f2974f759a9856351d4d2d7c5612c17cfc50f8479945df57716767b120a590f4bf656f4645029a525694d8a238446c5f5c2c1c995c09c1405b8b1eb9e0352ffdf766cc964f8dcf9f8f043dfab6d102cf4b298021abd78f1d9025fa1f8e1d710b38d9d1652f2d88d1305874ec41609b6617b65c5adb19b6295dc5c5da5fdf69f28144ea12f17c3c6fcce6b9b5157b3dfc969d6725fa5b098a4d9b1d31547ed4c9187452d281d0a5d456008caf1aa251fac8f950ca561982dc2dc908d3691ee3b6ad3ae3d22d002577264ca8e49c523bd51c4846be0d198ad9407bf6f7b82c79893eb2c05fe9981f687a97a4f01fe45ff8c8b7ecc551135cd960a0d6001ad35020be07ffb53cb9e731522ca8ae9364628914b9b8e8cc2f37f03393263603cc2b45295767eb0aac29b0930390eb89587ab2779d2e3decb8042acece725ba42eda650863f418f8d0d50d104e44fbbe5aa7389a4a144a8cecf00f45fb14c39112f9bfb56c0acbd44fa3ff261f5ce4acaa5134c2c1d0cca447040820c81ab1bcdc16aa075b7c68b10d06bbb7ce08b5b805e0238f24402cf24a4b4e00701935a0c68add3de090903f9b85b153cb179a582f57113bfc21c2093803f0cfa4d9d4672c2b05a24f7e4c34a8e9101b70303a7378b9c50b6cddd46814ef7fd73ef6923feceab8fc5aa8b0d185f2e83c7a99dcb1077c0ab5c1f5d5f01ba2f0420443f75c4417db9ebf1665efbb33dca224989920a64b44dc26f682cc77b4632c8454d49135e52503da855bc0f6ff8edc1145451a9772c06891f41064036b66c3119a0fc6e80dffeb65dc456108b7ca0296f4175fff3ed2b0f842cd46bd7e86f4c62dfaf1ddbf836263c00b34803de164983d0811cebfac86e7720c726d3048934c36c23189b02386a722ca9f0fe00233ab50db928d3bccea355cc681144b8b7edcaae4884d5a8f04425c0890ae2c74326e138066d8c05f4c82b29df99b034ea727afde590a1f2177ace3af99cfb1729d6539ce7f7f7314b046aab74497e63dd399e1f7d5f16517c23bd830d1fdee810f3c3b77573dd69c4b97d80d71fb5a632e00acdfa4f8e829faf3580d6a72c40b28a82172f8dcd4627663ebf6069736f21735fd84a226f427cd06bb055f94e7c92f31c48075a2955d82a5b9d2d0198ce0d4e131a112570a8ee40fb80462a81436a58e7db4e34b6e2c422e82f934ecda9949893da5730fc5c23c7c920f363f85ab28cc6a4206713c3152669b47efa8238fa826735f17b4e78750276162024ec85458cd5808e06f40dd9fd43775a456a3ff6cae90550d76d8b2899e0762ad9a371482b3e38083b1274708301d6346c22fea9bb4b73db490ff3ab05b2f7f9e187adef139a7794454b7300b8cc64d3ad76c0e4bc54e08833a4419251550655380d675bc91855aeb82585220bb97f03e976579c08f321b5f8f70988d3061f41465517d53ac571dbf1b24b94443d2e9a8e8a79b392b3d6a4ecdd7f626925c365ef6221305105ce9b5f5b6ecc5bed3d702bd4b7f5008aa8eb8c7aa3ade8ecf6251516fbefeea4e1082aa0e1848eddb31ffe44b04792d296054402826e4bd054e671f223e5557e4c94f89ca01c25c44f1a2ff2c05a70b43408250705e1b858bf0670679fdcd379203e36be3500dd981b1a6422c3cf15224f7fefdef0a5f225c5a09d15767598ecd9e262460bb33a4b5d09a64591efabc57c923d3be406979032ae0bc0997b65336a06dd75b253332ad6a8b63ef043f780a1b3fb6d0b6cad98b1ef4a02535eb39e14a866cfc5fc3a9c5deb2261300d71280ebe66a0776a151469551c3c5fa308757f956655278ec6330ae9e3625468c5f87e02cd9a6489910d4143c1f4ee13aa21a6859d907b788e28572fecee273d44e4a900fa0aa668dd861a60fb6b6b12c2c5ef3c8df1bd7ef5d4b0d1cdb8c15fffbb365b9784bd94abd001c6966216b9b67554ad7cb7f958b70092514f7800fc40244003e0fd1133a9b850fb17f4fcafde07fc87b07fb510670654a5d2d6fc9876ac74728ea41593beef003d6858786a52d3a40af7529596767c17000bfaf8dc52e871359f4ad8bf6e7b2853e5229bdf39657e213580294a5317c5df172865e1e17fe37093b585e04613f5f078f761b2b1752eb32983afda24b523af8851df9a02b37e77f543f18888a782a994a50563334282bf9cdfccc183fdf4fcd75ad86ee0d94f91ee2300a5befbccd14e03a77fc031a8cfe4f01e4c5290f5ac1da0d58ea054bd4837cfd93e5e34fc0eb16e48044ba76131f228d16cde9b0bb978ca7cdcd10653c358bdb26fdb723a530232c32ae0a4cecc06082f46e1c1d596bfe60621ad1e354e01e07b040cc7347c016653f44d926d13ca74e6cbc9d4ab4c99f4491c95c76fff5076b3936eb9d0a286b97c035ca88a3c6309f5febfd4cdaac869e4f58ed409b1e9eb4192fb2f9c2f12176d460fd98286c9d6df84598f260119fd29c63f800c07d8df83d5cc95f8c2fea2812e7890e8a0718bb1e031ecbebc0436dcf3e3b9a58bcc06b4c17f711f80fe1dffc3326a6eb6e00283055c6dabe20d311bfd5019591b7954f8163c9afad9ef8390a38f3582e0a79cdf0353de8eeb6b5f9f27b16ffdef7dd62869b4840ee226ccdce95e02c4545eb981b60571cd83f03dc5eaf8c97a0829a4318a9b3dc06c0e003db700b2260ff1fa8fee66890e637b109abb03ec901b05ca599775f48af50154c0e67d82bf0f558d7d3e0778dc38bea1eb5f74dc8d7f90abdf5511a424be66bf8b6a3cacb477d2e7ef4db68d2eba4d5289122d851f9501ba7e9c4957d8eba3be3fc8e785c4265a1d65c46f2809b70846c693864b169c9dcb78be26ea14b8613f145b01887222979a9e67aee5f800caa6f5c4229bdeefc901232ace6143c9865e4d9c07f51aa200afaf7e48a7d1d8faf366023beab12906ffcb3eaf72c0eb68075e4daf3c080e0c31911befc16f0cc4a09908bb7c1e26abab38bd7b788e1a09c0edf1a35a38d2ff1d3ed47fcdaae2f0934224694f5b56705b9409b6d3d64f3833b686f7576ec64bbdd6ff174e56c2d1edac0011f904681a73face26573fbba4e34652f7ae84acfb2fa5a5b3046f98178cd0831df7477de70e06a4c00e305f31aafc026ef064dd68fd3e4252b1b91d617b26c6d09b6891a00df68f105b5962e7f9d82da101dd595d286da721443b72b2aba2377f6e7772e33b3a5e3753da9c2578c5d1daab80187f55518c72a64ee150a7cb5649823c08c9f62cd7d020b45ec2cba8310db1a7785a46ab24785b4d54ff1660b5ca78e05a9a55edba9c60bf044737bc468101c4e8bd1480d749be5024adefca1d998abe33eaeb6b11fbb39da5d905fdd3f611b2e51517ccee4b8af72c2d948573505590d61a6783ab7278fc43fe55b1fcc0e7216444d3c8039bb8145ef1ce01c50e95a3f3feab0aee883fdb94cc13ee4d21c542aa795e18932228981690f4d4c57ca4db6eb5c092e29d8a05139d509a8aeb48baa1eb97a76e597a32b280b5e9d6c36859064c98ff96ef5126130264fa8d2f49213870d9fb036cff95da51f270311d9976208554e48ffd486470d0ecdb4e619ccbd8226147204baf8e235f54d8b1cba8fa34a9a4d055de515cdf180d2bb6739a175183c472e30b5c914d09eeb1b7dafd6872b38b48c6afc146101200e6e6a44fe5684e220adc11f5c403ddb15df8051e6bdef09117a3a5349938513776286473a3cf1d2788bb875052a2e6459fa7926da33380149c7f98d7700528a60c954e6f5ecb65842fde69d614be69eaa2040a4819ae6e756accf936e14c1e894489744a79c1f2c1eb295d13e2d767c09964b61f9cfe497649f712,0,33a32d10066fa3963a9518a14d1bd1cb5ccaceaeaaeddb4d7aead90c08395bfd,568146140669e69646a6ffeb3793e8010e2732209b4c34ec13e209a070109183,a1017beaa8784f283dee185cd847ae3a327a981e62ae21e8c5face175fc97e9b,250b93570d411149105ab8cb0bc5079914906306368c23e9d77c2a33265b994c,4ec7daf7294a4a2c717442dd21cf2f052a3bfe9d535b55da0f66fecf87a27534,52ab4db9c4b06621f8ded3405691eb32465b1360d15a6b127ded4d15f9cde466,ba9906da802407ddedf6733e29f3996c62425e79d3cbfeebbd6ec4cdc7c976a8,ee661e18c97319ad071106bf35fe1085034832f70718d92f887932128b6100c7,d4e3f18ac2e2095edb5c3b94236118ad,4faa6c4233d9fd53d170ede4172142a8,23f154ac43cfc59c4243e9fc68aeec8f19ad3942d74108e833b36f0dd3dcd357,8da7de6ea7bf2a81a396a42880ba1f5756734c4821309ac9aeffa2a26ce86873b9dc4935a772de6ec5162c6d075b14536800fb174841153511bfb597e992e2fe8a450c4bce102cc550bb37fd564c4d60bf884e,
+223,6c77432d1fda31e9f942f8af44607e10f3ad38a65f8a4bddae823e5eff90dc38,d2685070c1e6376e633e825296634fd461fa9e5bdf2109bcebd735e5a91f3e587c5cb782abb797fbf6bb5074fd1542a474f2a45b673763ec2db7fb99b737bbb9,56bd0c06f10352c3a1a9f4b4c92f6fa2b26df124b57878353c1fc691c51abea77c8817daeeb9fa546b77c8daf79d89b22b0e1b87574ece42371f00237aa9d83a,0,7e0e78eb6990b059e6cf0ded66ea93ef82e72aa2f18ac24f2fc6ebab561ae557420729da103f64cecfa20527e15f9fb669a49bbbf274ef0389b3e43c8c44e5f60bf2ac38e2b55e7ec4273dba15ba41d21f8f5b3ee1688b3c29951218caf847a97fb50d75a86515d445699497d968164bf740012679b8962de573be941c62b7ef,1,,1,193d019db571162e52567e0cfdf9dd6964394f32769ae2edc4933b03b502d771,2dd7b9cc85524f8670f695c3143ac26b45cebcabb2782a85e0fe15aee3956535,5e35f94adfd57976833bffec48ef6dde983d18a55501154191ea352ef06732ee,1918b741ef5f9d1d7670b050c152b4a4ead2c31be9aecb0681c0cd4324150853,97124c56236425d792b1ec85e34b846e8d88c9b9f1d4f23ac6cdcc4c177055a0,8c71b468c61119415e3c1dfdd184134211951e2f623199629a46bff9673611f2,b43b8791b51ed682f56d64351601be28e478264411dcf963b14ee60b9ae427fa,794dde4b38ef04250c534a7fa638f2e8cc8b6d2c6110ec290ab0171fdf277d51,cf2e25f23501399f30738d7eee652b90,225a477a28a54ea7671d2b217a9c29db,7ec02fea8c1484e3d0875f978c5f36d63545e2e4acf56311394422f4b66af612,,729847a3e9eba7a5bff454b5de3b393431ee360736b6c030d7a5bd01d1203d2e98f528543fd2bf886ccaa1ada5e215a730a36b3f4abfc4e252c89eb01d9512f94916dae8a76bf16e4da28986ffe159090fe5267ee3394300b7ccf4dfad389a26321b3a3423e4594a82ccfbad16d6561ecb8772b0cb040280ff999a29e3d9d4fd
+448,a6ec25127ca1aa4cf16b20084ba1e6516baae4d32422288e9b36d8bddd2de35a,ffffffffffffffffffffffffffffffffffffffffffffffffffffffff053d7ecca53e33e185a8b9be4e7699a97c6ff4c795522e5918ab7cd6b6884f67e683f3dc,ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa7730be30000000000000000000000000000000000000000000000000000000000000000,1,00cf68f8f7ac49ffaa02c4864fdf6dfe7bbf2c740b88d98c50ebafe32c92f3427f57601ffcb21a3435979287db8fee6c302926741f9d5e464c647eeb9b7acaeda46e00abd7506fc9a719847e9a7328215801e96198dac141a15c7c2f68e0690dd1176292a0dded04d1f548aad88f1aebdc0a8f87da4bb22df32dd7c160c225b843e83f6525d6d484f502f16d923124fc538794e21da2eb689d18d87406ecced5b9f92137239ed1d37bcfa7836641a83cf5e0a1cf63f51b06f158e499a459ede41c,1,,0,02b225089255f7b02b20276cfe9779144df8fb1957b477bff3239d802d1256e9,5232c4b6bde9d3d45d7b763ebd7495399bb825cc21de51011761cd81a51bdc84,379223d2f1ea7f8a22043c4ce4122623098309e15b1ce58286ebe3d3bf40f4e1,dd210aa6629f20bb328e5d89daa6eb2ac3d1c658a725536ff154f31b536c23b2,393472f85a5cc6b0f02c4bd466db7a2dc5b91fc9dcb15c0dd6dc21116ece8bca,c80b87b793db47320b2795db66d331bd3021cc24e360d59d0fa8974f54687e0c,ef16a43d77e2b270b0a145ee1618d35f3c943cc7877d6cfcff2287d41692be39,20d4b62e2d982c61bb0cc39a93283d98af36530ef12331d44b2477b0e521b490,fead69be77825a23daec377c362aa560,511d4980526c5e64aa7187462faeafdd,acb8f084ea763ddd1b92ac4ed23bf44de20b84ab677d4e4e6666a6090d40353d,,77b4656934a82de1a593d8481f020194ddafd8cac441f9d72aeb8721e6a14f49698ca6d9b2b6d59d07a01aa552fd4d5b68d0d1617574c77dea10bfadbaa31b83885b7ceac2fd45e3e4a331c51a74e7b1698d81b64c87c73c5b9258b4d83297f9debc2e9aa07f8572ff434dc792b83ecf07b3197de8dc9cf7be56acb59c66cff5
+673,0af952659ed76f80f585966b95ab6e6fd68654672827878684c8b547b1b94f5a,ffffffffffffffffffffffffffffffffffffffffffffffffffffffffc81017fd92fd31637c26c906b42092e11cc0d3afae8d9019d2578af22735ce7bc469c72d,9652d78baefc028cd37a6a92625b8b8f85fde1e4c944ad3f20e198bef8c02f19fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2e91870,0,5c6272ee55da855bbbf7b1246d9885aa7aa601a715ab86fa46c50da533badf82b97597c968293ae04e,97561,,0,4b1767466fe2fb8deddf2dc52cc19c7e2032007e19bfb420b30a80152d0f22d6,64c383e0e78ac99476ddff2061683eeefa505e3666673a1371342c3e6c26981d,5bcfeac98d87e87e158bf839f1269705429f7af2a25b566a25811b5f9aef9560,3568f2aea2e14ef4ee4a3c2a8b8d31bc5e3187ba86db10739b4ff8ec92ff6655,c7df866a62b7d404eb530b2be245a7aece0fb4791402a1de8f33530cbf777cc1,8f732e4aae2ba9314e0982492fa47954de9c189d92fbc549763b27b1b47642ce,992085edfecb92c62a3a7f96ea416f853f34d0dfe065b966b6968b8b87a83081,c5ba5eaf9e1c807154ebab3ea472499e815a7be56dfaf0c201cf6e91ffeca8e6,5e2375ac629b8df1e4ff3617c6255a70,70bcbffcb62e4d29d2605d30bceef137,7332e92a3f9d2792c4d444fac5ed888c39a073043a65eefb626318fd649328f8,,657a4a19711ce593c3844cb391b224f60124aba7e04266233bc50cafb971e26c7716b76e98376448f7d214dd11e629ef9a974d60e3770a695810a61c4ba66d78b936ee7892b98f0b48ddae9fcd8b599dca1c9b43e9b95e0226cf8d4459b8a7c2c4e6db80f1d58c7b20dd7208fa5c1057fb78734223ee801dbd851db601fee61e
+1024,f90e080c64b05824c5a24b2501d5aeaf08af3872ee860aa80bdcd430f7b63494,ffffffffffffffffffffffffffffffffffffffffffffffffffffffff115173765dc202cf029ad3f15479735d57697af12b0131dd21430d5772e4ef11474d58b9,12a50f3fafea7c1eeada4cf8d33777704b77361453afc83bda91eef349ae044d20126c6200547ea5a6911776c05dee2a7f1a9ba7dfbabbbd273c3ef29ef46e46,1,5f67d15d22ca9b2804eeab0a66f7f8e3a10fa5de5809a046084348cbc5304e843ef96f59a59c7d7fdfe5946489f3ea297d941bac326225df316a25fc90f0e65b0d31a9c497e960fdbf8c482516bc8a9c1c77b7f6d0e1143810c737f76f9224e6f2c9af5186b4f7259c7e8d165b6e4fe3d38a60bdbdd4d06ecdcaaf62086070dbb68686b802d53dfd7db14b18743832605f5461ad81e2af4b7e8ff0eff0867a25b93cec7becf15c43131895fed09a83bf1ee4a87d44dd0f02a837bf5a1232e201cb882734eb9643dc2dc4d4e8b5690840766212c7ac8f38ad8a9ec47c7a9b3e022ae3eb6a32522128b518bd0d0085dd81c5,69615,,1,8b8de966150bf872b4b695c9983df519c909811954d5d76e99ed0d5f1860247b,eef379db9bd4b1aa90fc347fad33f7d53083389e22e971036f59f4e29d325ac2,0a402d812314646ccc2565c315d1429ec1ed130ff92ff3f48d948f29c3762cf1,e25461fb0e4c162e18123ecde88342d54d449631e9b75a266fd9260c2bb2f41d,97771ce2ce17a25c3d65bf9f8e4acb830dce8d41392be3e4b8ed902a3106681a,2e7022b4eae9152942f68160a93e25d3e197a557385594aa587cb5e431bb470d,613f85a82d783ce450cfd7e91a027fcc4ad5610872f83e4dbe9e2202184c6d6e,cb5de4ed1083222e381401cf88e3167796bc9ab5b8aa1f27b718f39d1e6c0e87,b709dea25e0be287c50e3603482c2e98,1f677e9d7392ebe3633fd82c9efb0f16,889f339285564fd868401fac8380bb9887925122ec8f31c8ae51ce067def103b,,7c4b9e1e6c1ce69da7b01513cdc4588fd93b04dafefaf87f31561763d906c672bac3dfceb751ebd126728ac017d4d580e931b8e5c7d5dfe0123be4dc9b2d2238b655c8a7fadaf8082c31e310909b5b731efc12f0a56e849eae6bfeedcc86dd27ef9b91d159256aa8e8d2b71a311f73350863d70f18d0d7302cf551e4303c7733
diff --git a/bip-0324/reference.py b/bip-0324/reference.py
new file mode 100644
index 0000000..f02c44a
--- /dev/null
+++ b/bip-0324/reference.py
@@ -0,0 +1,649 @@
+"""Reference implementation for the cryptographic aspects of BIP-324"""
+
+import sys
+import random
+import hashlib
+import hmac
+
+### BIP-340 tagged hash
+
+def TaggedHash(tag, data):
+ """Compute BIP-340 tagged hash with specified tag string of data."""
+ ss = hashlib.sha256(tag.encode('utf-8')).digest()
+ ss += ss
+ ss += data
+ return hashlib.sha256(ss).digest()
+
+### HKDF-SHA256
+
+def hmac_sha256(key, data):
+ """Compute HMAC-SHA256 from specified byte arrays key and data."""
+ return hmac.new(key, data, hashlib.sha256).digest()
+
+def hkdf_sha256(length, ikm, salt, info):
+ """Derive a key using HKDF-SHA256."""
+ if len(salt) == 0:
+ salt = bytes([0] * 32)
+ prk = hmac_sha256(salt, ikm)
+ t = b""
+ okm = b""
+ for i in range((length + 32 - 1) // 32):
+ t = hmac_sha256(prk, t + info + bytes([i + 1]))
+ okm += t
+ return okm[:length]
+
+### secp256k1 field/group elements
+
+def modinv(a, n):
+ """Compute the modular inverse of a modulo n using the extended Euclidean
+ Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
+ """
+ a = a % n
+ if a == 0:
+ return 0
+ if sys.hexversion >= 0x3080000:
+ # More efficient version available in Python 3.8.
+ return pow(a, -1, n)
+ t1, t2 = 0, 1
+ r1, r2 = n, a
+ while r2 != 0:
+ q = r1 // r2
+ t1, t2 = t2, t1 - q * t2
+ r1, r2 = r2, r1 - q * r2
+ if r1 > 1:
+ return None
+ if t1 < 0:
+ t1 += n
+ return t1
+
+class FE:
+ """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977).
+
+ They are represented internally in numerator / denominator form, in order to delay inversions.
+ """
+
+ SIZE = 2**256 - 2**32 - 977
+
+ def __init__(self, a=0, b=1):
+ """Initialize an FE as a/b; both a and b can be ints or field elements."""
+ if isinstance(b, FE):
+ if isinstance(a, FE):
+ self.num = (a.num * b.den) % FE.SIZE
+ self.den = (a.den * b.num) % FE.SIZE
+ else:
+ self.num = (a * b.den) % FE.SIZE
+ self.den = b.num
+ else:
+ b = b % FE.SIZE
+ assert b != 0
+ if isinstance(a, FE):
+ self.num = a.num
+ self.den = (a.den * b) % FE.SIZE
+ else:
+ self.num = a % FE.SIZE
+ self.den = b
+
+ def __add__(self, a):
+ """Compute the sum of two field elements (second may be int)."""
+ if isinstance(a, FE):
+ return FE(self.num * a.den + self.den * a.num, self.den * a.den)
+ return FE(self.num + self.den * a, self.den)
+
+ def __radd__(self, a):
+ """Compute the sum of an integer and a field element."""
+ return FE(self.num + self.den * a, self.den)
+
+ def __sub__(self, a):
+ """Compute the difference of two field elements (second may be int)."""
+ if isinstance(a, FE):
+ return FE(self.num * a.den - self.den * a.num, self.den * a.den)
+ return FE(self.num - self.den * a, self.den)
+
+ def __rsub__(self, a):
+ """Compute the difference between an integer and a field element."""
+ return FE(self.den * a - self.num, self.den)
+
+ def __mul__(self, a):
+ """Compute the product of two field elements (second may be int)."""
+ if isinstance(a, FE):
+ return FE(self.num * a.num, self.den * a.den)
+ return FE(self.num * a, self.den)
+
+ def __rmul__(self, a):
+ """Compute the product of an integer with a field element."""
+ return FE(self.num * a, self.den)
+
+ def __truediv__(self, a):
+ """Compute the ratio of two field elements (second may be int)."""
+ return FE(self, a)
+
+ def __rtruediv__(self, a):
+ """Compute the ratio of an integer and a field element."""
+ return FE(a, self)
+
+ def __pow__(self, a):
+ """Raise a field element to a (positive) integer power."""
+ return FE(pow(self.num, a, FE.SIZE), pow(self.den, a, FE.SIZE))
+
+ def __neg__(self):
+ """Negate a field element."""
+ return FE(-self.num, self.den)
+
+ def __int__(self):
+ """Convert a field element to an integer. The result is cached."""
+ if self.den != 1:
+ self.num = (self.num * modinv(self.den, FE.SIZE)) % FE.SIZE
+ self.den = 1
+ return self.num
+
+ def sqrt(self):
+ """Compute the square root of a field element.
+
+ Due to the fact that our modulus p is of the form p = 3 (mod 4), the
+ Tonelli-Shanks algorithm (https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm)
+ is simply raising the argument to the power (p + 1) / 4.
+
+ To see why: p-1 = 0 (mod 2), so 2 divides the order of the multiplicative group,
+ and thus only half of the non-zero field elements are squares. An element a is
+ a (nonzero) square when Euler's criterion, a^((p-1)/2) = 1 (mod p), holds. We're
+ looking for x such that x^2 = a (mod p). Given a^((p-1)/2) = 1 (mod p), that is
+ equivalent to x^2 = a^(1 + (p-1)/2) (mod p). As (1 + (p-1)/2) is even, this is
+ equivalent to x = a^((1 + (p-1)/2)/2) (mod p), or x = a^((p+1)/4) (mod p)."""
+ v = int(self)
+ s = pow(v, (FE.SIZE + 1) // 4, FE.SIZE)
+ if s**2 % FE.SIZE == v:
+ return FE(s)
+ return None
+
+ def sqrts(self):
+ """Compute all square roots of a field element, if any."""
+ s = self.sqrt()
+ if s is None:
+ return []
+ return [FE(s), -FE(s)]
+
+ # The cube roots of 1 (mod p).
+ CBRT1 = [
+ 1,
+ 0x851695d49a83f8ef919bb86153cbcb16630fb68aed0a766a3ec693d68e6afa40,
+ 0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee
+ ]
+
+
+ def cbrts(self):
+ """Compute all cube roots of a field element, if any.
+
+ Due to the fact that our modulus p is of the form p = 7 (mod 9), one cube root
+ can always be computed by raising to the power (p + 2) / 9. The other roots
+ (if any) can be found by multiplying with the two non-trivial cube roots of 1.
+
+ To see why: p-1 = 0 (mod 3), so 3 divides the order of the multiplicative group,
+ and thus only 1/3 of the non-zero field elements are cubes. An element a is a
+ (nonzero) cube when a^((p-1)/3) = 1 (mod p). We're looking for x such that
+ x^3 = a (mod p). Given a^((p-1)/3) = 1 (mod p), that is equivalent to
+ x^3 = a^(1 + (p-1)/3) (mod p). As (1 + (p-1)/3) is a multiple of 3, this is
+ equivalent to x = a^((1 + (p-1)/3)/3) (mod p), or x = a^((p+2)/9) (mod p)."""
+ v = int(self)
+ c = pow(v, (FE.SIZE + 2) // 9, FE.SIZE)
+
+ if pow(c, 3, FE.SIZE) == v:
+ return [FE(c * f) for f in FE.CBRT1]
+ return []
+
+ def is_square(self):
+ """Determine if this field element has a square root."""
+ # Compute the Jacobi symbol of (self / p). Since our modulus is prime, this
+ # is the same as the Legendre symbol, which determines quadratic residuosity.
+ # See https://en.wikipedia.org/wiki/Jacobi_symbol for the algorithm.
+ n, k, t = (self.num * self.den) % FE.SIZE, FE.SIZE, 0
+ if n == 0:
+ return True
+ while n != 0:
+ while n & 1 == 0:
+ n >>= 1
+ r = k & 7
+ t ^= (r in (3, 5))
+ n, k = k, n
+ t ^= (n & k & 3 == 3)
+ n = n % k
+ assert k == 1
+ return not t
+
+ def __eq__(self, a):
+ """Check whether two field elements are equal (second may be an int)."""
+ if isinstance(a, FE):
+ return (self.num * a.den - self.den * a.num) % FE.SIZE == 0
+ return (self.num - self.den * a) % FE.SIZE == 0
+
+ def to_bytes(self):
+ """Convert a field element to 32-byte big endian encoding."""
+ return int(self).to_bytes(32, 'big')
+
+ @staticmethod
+ def from_bytes(b):
+ """Convert a 32-byte big endian encoding of a field element to an FE."""
+ v = int.from_bytes(b, 'big')
+ if v >= FE.SIZE:
+ return None
+ return FE(v)
+
+ def __str__(self):
+ """Convert this field element to a string."""
+ return f"{int(self):064x}"
+
+ def __repr__(self):
+ """Get a string representation of this field element."""
+ return f"FE(0x{int(self):x})"
+
+assert all(pow(c, 3, FE.SIZE) == 1 for c in FE.CBRT1)
+
+class GE:
+ """Objects of this class represent points (group elements) on the secp256k1 curve.
+
+ The point at infinity is represented as None."""
+
+ ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
+ ORDER_HALF = ORDER // 2
+
+ def __init__(self, x, y):
+ """Initialize a group element with specified x and y coordinates (must be on curve)."""
+ fx = FE(x)
+ fy = FE(y)
+ assert fy**2 == fx**3 + 7
+ self.x = fx
+ self.y = fy
+
+ def double(self):
+ """Compute the double of a point."""
+ l = 3 * self.x**2 / (2 * self.y)
+ x3 = l**2 - 2 * self.x
+ y3 = l * (self.x - x3) - self.y
+ return GE(x3, y3)
+
+ def __add__(self, a):
+ """Add two points, or a point and infinity, together."""
+ if a is None:
+ # Adding point at infinity
+ return self
+ if self.x != a.x:
+ # Adding distinct x coordinates
+ l = (a.y - self.y) / (a.x - self.x)
+ x3 = l**2 - self.x - a.x
+ y3 = l * (self.x - x3) - self.y
+ return GE(x3, y3)
+ if self.y == a.y:
+ # Adding point to itself
+ return self.double()
+ # Adding point to its negation
+ return None
+
+ def __radd__(self, a):
+ """Add infinity to a point."""
+ assert a is None
+ return self
+
+ def __mul__(self, a):
+ """Multiply a point with an integer (scalar multiplication)."""
+ r = None
+ for i in range(a.bit_length() - 1, -1, -1):
+ if r is not None:
+ r = r.double()
+ if (a >> i) & 1:
+ r += self
+ return r
+
+ def __rmul__(self, a):
+ """Multiply an integer with a point (scalar multiplication)."""
+ return self * a
+
+ @staticmethod
+ def lift_x(x):
+ """Take an FE, and return the point with that as X coordinate, and square Y."""
+ y = (FE(x)**3 + 7).sqrt()
+ if y is None:
+ return None
+ return GE(x, y)
+
+ @staticmethod
+ def is_valid_x(x):
+ """Determine whether the provided field element is a valid X coordinate."""
+ return (FE(x)**3 + 7).is_square()
+
+ def __str__(self):
+ """Convert this group element to a string."""
+ return f"({self.x},{self.y})"
+
+ def __repr__(self):
+ """Get a string representation for this group element."""
+ return f"GE(0x{int(self.x)},0x{int(self.y)})"
+
+SECP256K1_G = GE(
+ 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
+ 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
+
+### ElligatorSwift
+
+# Precomputed constant square root of -3 (mod p).
+MINUS_3_SQRT = FE(-3).sqrt()
+
+def xswiftec(u, t):
+ """Decode field elements (u, t) to an X coordinate on the curve."""
+ if u == 0:
+ u = FE(1)
+ if t == 0:
+ t = FE(1)
+ if u**3 + t**2 + 7 == 0:
+ t = 2 * t
+ X = (u**3 + 7 - t**2) / (2 * t)
+ Y = (X + t) / (MINUS_3_SQRT * u)
+ for x in (u + 4 * Y**2, (-X / Y - u) / 2, (X / Y - u) / 2):
+ if GE.is_valid_x(x):
+ return x
+ assert False
+
+def xswiftec_inv(x, u, case):
+ """Given x and u, find t such that xswiftec(u, t) = x, or return None.
+
+ Case selects which of the up to 8 results to return."""
+
+ if case & 2 == 0:
+ if GE.is_valid_x(-x - u):
+ return None
+ v = x
+ s = -(u**3 + 7) / (u**2 + u*v + v**2)
+ else:
+ s = x - u
+ if s == 0:
+ return None
+ r = (-s * (4 * (u**3 + 7) + 3 * s * u**2)).sqrt()
+ if r is None:
+ return None
+ if case & 1 and r == 0:
+ return None
+ v = (-u + r / s) / 2
+ w = s.sqrt()
+ if w is None:
+ return None
+ if case & 5 == 0: return -w * (u * (1 - MINUS_3_SQRT) / 2 + v)
+ if case & 5 == 1: return w * (u * (1 + MINUS_3_SQRT) / 2 + v)
+ if case & 5 == 4: return w * (u * (1 - MINUS_3_SQRT) / 2 + v)
+ if case & 5 == 5: return -w * (u * (1 + MINUS_3_SQRT) / 2 + v)
+
+def xelligatorswift(x):
+ """Given a field element X on the curve, find (u, t) that encode them."""
+ while True:
+ u = FE(random.randrange(1, GE.ORDER))
+ case = random.randrange(0, 8)
+ t = xswiftec_inv(x, u, case)
+ if t is not None:
+ return u, t
+
+def ellswift_create():
+ """Generate a (privkey, ellswift_pubkey) pair."""
+ priv = random.randrange(1, GE.ORDER)
+ u, t = xelligatorswift((priv * SECP256K1_G).x)
+ return priv.to_bytes(32, 'big'), u.to_bytes() + t.to_bytes()
+
+def ellswift_decode(ellswift):
+ """Convert ellswift encoded X coordinate to 32-byte xonly format."""
+ u = FE(int.from_bytes(ellswift[:32], 'big'))
+ t = FE(int.from_bytes(ellswift[32:], 'big'))
+ return xswiftec(u, t).to_bytes()
+
+def ellswift_ecdh_xonly(pubkey_theirs, privkey):
+ """Compute X coordinate of shared ECDH point between elswift pubkey and privkey."""
+ d = int.from_bytes(privkey, 'big')
+ pub = ellswift_decode(pubkey_theirs)
+ return (d * GE.lift_x(FE.from_bytes(pub))).x.to_bytes()
+
+### Poly1305
+
+class Poly1305:
+ """Class representing a running poly1305 computation."""
+ MODULUS = 2**130 - 5
+
+ def __init__(self, key):
+ self.r = int.from_bytes(key[:16], 'little') & 0xffffffc0ffffffc0ffffffc0fffffff
+ self.s = int.from_bytes(key[16:], 'little')
+ self.acc = 0
+
+ def add(self, msg, length=None, pad=False):
+ """Add a message of any length. Input so far must be a multiple of 16 bytes."""
+ length = len(msg) if length is None else length
+ for i in range((length + 15) // 16):
+ chunk = msg[i * 16:i * 16 + min(16, length - i * 16)]
+ val = int.from_bytes(chunk, 'little') + 256**(16 if pad else len(chunk))
+ self.acc = (self.r * (self.acc + val)) % Poly1305.MODULUS
+ return self
+
+ def tag(self):
+ """Compute the poly1305 tag."""
+ return ((self.acc + self.s) & 0xffffffffffffffffffffffffffffffff).to_bytes(16, 'little')
+
+### ChaCha20
+
+CHACHA20_INDICES = (
+ (0, 4, 8, 12), (1, 5, 9, 13), (2, 6, 10, 14), (3, 7, 11, 15),
+ (0, 5, 10, 15), (1, 6, 11, 12), (2, 7, 8, 13), (3, 4, 9, 14)
+)
+
+CHACHA20_CONSTANTS = (0x61707865, 0x3320646e, 0x79622d32, 0x6b206574)
+
+def rotl32(v, bits):
+ """Rotate the 32-bit value v left by bits bits."""
+ return ((v << bits) & 0xffffffff) | (v >> (32 - bits))
+
+def chacha20_doubleround(s):
+ """Apply a ChaCha20 double round to 16-element state array s.
+
+ See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439
+ """
+ for a, b, c, d in CHACHA20_INDICES:
+ s[a] = (s[a] + s[b]) & 0xffffffff
+ s[d] = rotl32(s[d] ^ s[a], 16)
+ s[c] = (s[c] + s[d]) & 0xffffffff
+ s[b] = rotl32(s[b] ^ s[c], 12)
+ s[a] = (s[a] + s[b]) & 0xffffffff
+ s[d] = rotl32(s[d] ^ s[a], 8)
+ s[c] = (s[c] + s[d]) & 0xffffffff
+ s[b] = rotl32(s[b] ^ s[c], 7)
+
+def chacha20_block(key, nonce, cnt):
+ """Compute the 64-byte output of the ChaCha20 block function.
+
+ Takes as input a 32-byte key, 12-byte nonce, and 32-bit integer counter.
+ """
+ # Initial state.
+ init = [0 for _ in range(16)]
+ for i in range(4):
+ init[i] = CHACHA20_CONSTANTS[i]
+ for i in range(8):
+ init[4 + i] = int.from_bytes(key[4 * i:4 * (i+1)], 'little')
+ init[12] = cnt
+ for i in range(3):
+ init[13 + i] = int.from_bytes(nonce[4 * i:4 * (i+1)], 'little')
+ # Perform 20 rounds.
+ state = list(init)
+ for _ in range(10):
+ chacha20_doubleround(state)
+ # Add initial values back into state.
+ for i in range(16):
+ state[i] = (state[i] + init[i]) & 0xffffffff
+ # Produce byte output
+ return b''.join(state[i].to_bytes(4, 'little') for i in range(16))
+
+### ChaCha20Poly1305
+
+def aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext):
+ """Encrypt a plaintext using ChaCha20Poly1305."""
+ ret = bytearray()
+ msg_len = len(plaintext)
+ for i in range((msg_len + 63) // 64):
+ now = min(64, msg_len - 64 * i)
+ keystream = chacha20_block(key, nonce, i + 1)
+ for j in range(now):
+ ret.append(plaintext[j + 64 * i] ^ keystream[j])
+ poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32])
+ poly1305.add(aad, pad=True).add(ret, pad=True)
+ poly1305.add(len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little'))
+ ret += poly1305.tag()
+ return bytes(ret)
+
+def aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext):
+ """Decrypt a ChaCha20Poly1305 ciphertext."""
+ if len(ciphertext) < 16:
+ return None
+ msg_len = len(ciphertext) - 16
+ poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32])
+ poly1305.add(aad, pad=True)
+ poly1305.add(ciphertext, length=msg_len, pad=True)
+ poly1305.add(len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little'))
+ if ciphertext[-16:] != poly1305.tag():
+ return None
+ ret = bytearray()
+ for i in range((msg_len + 63) // 64):
+ now = min(64, msg_len - 64 * i)
+ keystream = chacha20_block(key, nonce, i + 1)
+ for j in range(now):
+ ret.append(ciphertext[j + 64 * i] ^ keystream[j])
+ return bytes(ret)
+
+### FSChaCha20{,Poly1305}
+
+REKEY_INTERVAL = 224 # packets
+
+class FSChaCha20Poly1305:
+ """Rekeying wrapper AEAD around ChaCha20Poly1305."""
+
+ def __init__(self, initial_key):
+ self.key = initial_key
+ self.packet_counter = 0
+
+ def crypt(self, aad, text, is_decrypt):
+ """Encrypt or decrypt the specified (plain/cipher)text."""
+ nonce = ((self.packet_counter % REKEY_INTERVAL).to_bytes(4, 'little') +
+ (self.packet_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
+ if is_decrypt:
+ ret = aead_chacha20_poly1305_decrypt(self.key, nonce, aad, text)
+ else:
+ ret = aead_chacha20_poly1305_encrypt(self.key, nonce, aad, text)
+ if (self.packet_counter + 1) % REKEY_INTERVAL == 0:
+ rekey_nonce = b"\xFF\xFF\xFF\xFF" + nonce[4:]
+ newkey1 = aead_chacha20_poly1305_encrypt(self.key, rekey_nonce, b"", b"\x00" * 32)[:32]
+ newkey2 = chacha20_block(self.key, rekey_nonce, 1)[:32]
+ assert newkey1 == newkey2
+ self.key = newkey1
+ self.packet_counter += 1
+ return ret
+
+ def encrypt(self, aad, plaintext):
+ """Encrypt the specified plaintext with provided AAD."""
+ return self.crypt(aad, plaintext, False)
+
+ def decrypt(self, aad, ciphertext):
+ """Decrypt the specified ciphertext with provided AAD."""
+ return self.crypt(aad, ciphertext, True)
+
+
+class FSChaCha20:
+ """Rekeying wrapper stream cipher around ChaCha20."""
+
+ def __init__(self, initial_key):
+ self.key = initial_key
+ self.block_counter = 0
+ self.chunk_counter = 0
+ self.keystream = b''
+
+ def get_keystream_bytes(self, nbytes):
+ """Generate nbytes keystream bytes."""
+ while len(self.keystream) < nbytes:
+ nonce = ((0).to_bytes(4, 'little') +
+ (self.chunk_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
+ self.keystream += chacha20_block(self.key, nonce, self.block_counter)
+ self.block_counter += 1
+ ret = self.keystream[:nbytes]
+ self.keystream = self.keystream[nbytes:]
+ return ret
+
+ def crypt(self, chunk):
+ """Encrypt or decypt chunk."""
+ ks = self.get_keystream_bytes(len(chunk))
+ ret = bytes([ks[i] ^ chunk[i] for i in range(len(chunk))])
+ if ((self.chunk_counter + 1) % REKEY_INTERVAL) == 0:
+ self.key = self.get_keystream_bytes(32)
+ self.block_counter = 0
+ self.chunk_counter += 1
+ return ret
+
+ def encrypt(self, chunk):
+ """Encrypt chunk."""
+ return self.crypt(chunk)
+
+ def decrypt(self, chunk):
+ """Decrypt chunk."""
+ return self.crypt(chunk)
+
+
+### Shared secret computation
+
+def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating):
+ """Compute BIP324 shared secret."""
+
+ ecdh_point_x32 = ellswift_ecdh_xonly(ellswift_theirs, priv)
+ if initiating:
+ # Initiating, place our public key encoding first.
+ return TaggedHash("bip324_ellswift_xonly_ecdh",
+ ellswift_ours + ellswift_theirs + ecdh_point_x32)
+ # Responding, place their public key encoding first.
+ return TaggedHash("bip324_ellswift_xonly_ecdh",
+ ellswift_theirs + ellswift_ours + ecdh_point_x32)
+
+### Key derivation
+
+NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9'
+
+def initialize_v2_transport(ecdh_secret, initiating):
+ """Return a peer object with various BIP324 derived keys and ciphers."""
+
+ peer = {}
+ salt = b'bitcoin_v2_shared_secret' + NETWORK_MAGIC
+ for name, length in (
+ ('initiator_L', 32), ('initiator_P', 32), ('responder_L', 32), ('responder_P', 32),
+ ('garbage_terminators', 32), ('session_id', 32)):
+ peer[name] = hkdf_sha256(
+ salt=salt, ikm=ecdh_secret, info=name.encode('utf-8'), length=length)
+ peer['initiator_garbage_terminator'] = peer['garbage_terminators'][:16]
+ peer['responder_garbage_terminator'] = peer['garbage_terminators'][16:]
+ del peer['garbage_terminators']
+ if initiating:
+ peer['send_L'] = FSChaCha20(peer['initiator_L'])
+ peer['send_P'] = FSChaCha20Poly1305(peer['initiator_P'])
+ peer['send_garbage_terminator'] = peer['initiator_garbage_terminator']
+ peer['recv_L'] = FSChaCha20(peer['responder_L'])
+ peer['recv_P'] = FSChaCha20Poly1305(peer['responder_P'])
+ peer['recv_garbage_terminator'] = peer['responder_garbage_terminator']
+ else:
+ peer['send_L'] = FSChaCha20(peer['responder_L'])
+ peer['send_P'] = FSChaCha20Poly1305(peer['responder_P'])
+ peer['send_garbage_terminator'] = peer['responder_garbage_terminator']
+ peer['recv_L'] = FSChaCha20(peer['initiator_L'])
+ peer['recv_P'] = FSChaCha20Poly1305(peer['initiator_P'])
+ peer['recv_garbage_terminator'] = peer['initiator_garbage_terminator']
+
+ return peer
+
+### Packet encryption
+
+LENGTH_FIELD_LEN = 3
+HEADER_LEN = 1
+IGNORE_BIT_POS = 7
+
+def v2_enc_packet(peer, contents, aad=b'', ignore=False):
+ """Encrypt a BIP324 packet."""
+
+ assert len(contents) <= 2**24 - 1
+ header = (ignore << IGNORE_BIT_POS).to_bytes(HEADER_LEN, 'little')
+ plaintext = header + contents
+ aead_ciphertext = peer['send_P'].encrypt(aad, plaintext)
+ enc_plaintext_len = peer['send_L'].encrypt(len(contents).to_bytes(LENGTH_FIELD_LEN, 'little'))
+ return enc_plaintext_len + aead_ciphertext
diff --git a/bip-0324/run_test_vectors.py b/bip-0324/run_test_vectors.py
new file mode 100644
index 0000000..8e4b8f2
--- /dev/null
+++ b/bip-0324/run_test_vectors.py
@@ -0,0 +1,69 @@
+"""Run the BIP-324 test vectors."""
+
+import csv
+import os
+import sys
+
+import reference
+
+FILENAME_PACKET_TEST = os.path.join(sys.path[0], 'packet_encoding_test_vectors.csv')
+FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
+FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
+
+with open(FILENAME_PACKET_TEST, newline='', encoding='utf-8') as csvfile:
+ print(f"Running {FILENAME_PACKET_TEST} tests...")
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ in_initiating = int(row['in_initiating'])
+ bytes_priv_ours = bytes.fromhex(row['in_priv_ours'])
+ int_priv_ours = int.from_bytes(bytes_priv_ours, 'big')
+ assert row['mid_x_ours'] == (int_priv_ours * reference.SECP256K1_G).x.to_bytes().hex()
+ bytes_ellswift_ours = bytes.fromhex(row['in_ellswift_ours'])
+ assert row['mid_x_ours'] == reference.ellswift_decode(bytes_ellswift_ours).hex()
+ bytes_ellswift_theirs = bytes.fromhex(row['in_ellswift_theirs'])
+ assert row['mid_x_theirs'] == reference.ellswift_decode(bytes_ellswift_theirs).hex()
+ x_shared = reference.ellswift_ecdh_xonly(bytes_ellswift_theirs, bytes_priv_ours)
+ assert row['mid_x_shared'] == x_shared.hex()
+ shared_secret = reference.v2_ecdh(bytes_priv_ours, bytes_ellswift_theirs,
+ bytes_ellswift_ours, in_initiating)
+ assert row['mid_shared_secret'] == shared_secret.hex()
+
+ peer = reference.initialize_v2_transport(shared_secret, in_initiating)
+ assert row['mid_initiator_l'] == peer['initiator_L'].hex()
+ assert row['mid_initiator_p'] == peer['initiator_P'].hex()
+ assert row['mid_responder_l'] == peer['responder_L'].hex()
+ assert row['mid_responder_p'] == peer['responder_P'].hex()
+ assert row['mid_send_garbage_terminator'] == peer['send_garbage_terminator'].hex()
+ assert row['mid_recv_garbage_terminator'] == peer['recv_garbage_terminator'].hex()
+ assert row['out_session_id'] == peer['session_id'].hex()
+ for _ in range(int(row['in_idx'])):
+ reference.v2_enc_packet(peer, b"")
+ ciphertext = reference.v2_enc_packet(
+ peer,
+ bytes.fromhex(row['in_contents']) * int(row['in_multiply']),
+ bytes.fromhex(row['in_aad']), int(row['in_ignore']))
+ if len(row['out_ciphertext']):
+ assert row['out_ciphertext'] == ciphertext.hex()
+ if len(row['out_ciphertext_endswith']):
+ assert ciphertext.hex().endswith(row['out_ciphertext_endswith'])
+
+with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
+ print(f"Running {FILENAME_XSWIFTEC_INV_TEST} tests...")
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ u = reference.FE.from_bytes(bytes.fromhex(row['u']))
+ x = reference.FE.from_bytes(bytes.fromhex(row['x']))
+ for case in range(8):
+ ret = reference.xswiftec_inv(x, u, case)
+ if ret is None:
+ assert row[f"case{case}_t"] == ""
+ else:
+ assert row[f"case{case}_t"] == ret.to_bytes().hex()
+ assert reference.xswiftec(u, ret) == x
+
+with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
+ print(f"Running {FILENAME_ELLSWIFT_DECODE_TEST} tests...")
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ ellswift = bytes.fromhex(row['ellswift'])
+ assert reference.ellswift_decode(ellswift).hex() == row['x']
diff --git a/bip-0324/secp256k1_test_vectors.py b/bip-0324/secp256k1_test_vectors.py
new file mode 100644
index 0000000..57ae801
--- /dev/null
+++ b/bip-0324/secp256k1_test_vectors.py
@@ -0,0 +1,52 @@
+"""Convert the BIP-324 test vectors to secp256k1 code."""
+
+import csv
+import reference
+import os
+import sys
+
+FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
+FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
+
+def format_int(v):
+ """Format 0 as "0", but other integers as 0x%08x."""
+ if v == 0:
+ return "0"
+ return f"0x{v:08x}"
+
+def format_fe(fe):
+ """Format a field element constant as SECP256K1_FE_CONST code."""
+ vals = [(int(fe) >> (32 * (7 - i))) & 0xffffffff for i in range(8)]
+ strs = ", ".join(format_int(v) for v in vals)
+ return f"SECP256K1_FE_CONST({strs})"
+
+def output_xswiftec_inv_cases():
+ """Generate lines corresponding to the xswiftec_inv test cases."""
+ with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
+ reader = csv.DictReader(csvfile)
+ print("xswiftec_inv cases:")
+ for row in reader:
+ u = int.from_bytes(bytes.fromhex(row['u']), 'big')
+ x = int.from_bytes(bytes.fromhex(row['x']), 'big')
+ pat = sum(1<<c for c in range(8) if row[f"case{c}_t"])
+ tstrs = []
+ for c in range(8):
+ tstrs.append(format_fe(int.from_bytes(bytes.fromhex(row[f"case{c}_t"]), 'big')))
+ print(f" {{0x{pat:02x}, {format_fe(u)}, {format_fe(x)}, {{{', '.join(tstrs)}}}}},")
+ print()
+
+def output_ellswift_decode_cases():
+ """Generate lines corresponding to the ellswift_decode test cases."""
+ with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
+ reader = csv.DictReader(csvfile)
+ print("ellswift_decode cases:")
+ for row in reader:
+ enc = bytes.fromhex(row['ellswift'])
+ tval = int.from_bytes(enc[32:], 'big') % reference.FE.SIZE
+ x = int.from_bytes(bytes.fromhex(row['x']), 'big')
+ encstr = ", ".join(f"0x{b:02x}" for b in enc)
+ print(f" {{{{{encstr}}}, {format_fe(x)}, {tval & 1}}},")
+ print()
+
+output_xswiftec_inv_cases()
+output_ellswift_decode_cases()
diff --git a/bip-0324/test_sage_decoding.py b/bip-0324/test_sage_decoding.py
new file mode 100644
index 0000000..c26c334
--- /dev/null
+++ b/bip-0324/test_sage_decoding.py
@@ -0,0 +1,78 @@
+"""Compare ellswift decoding in the BIP-324 test vectors against the SwiftEC reference code.
+
+Instructions:
+
+* Clone the SwiftEC repository, and enter the directory:
+
+ git clone https://github.com/Jchavezsaab/SwiftEC
+ git checkout 5320a25035d91addde29d14164cce684b56a12ed
+ cd SwiftEC
+
+* Generate parameters for the secp256k1 curve:
+
+ sage --python generate_parameters.py -p secp256k1
+
+* Copy over this file and the CSV test vectors:
+
+ cp PATH_TO_BIPS_REPO/bips/bip-0324/{*.csv,test_sage_decoding.py} .
+
+* Run the tests:
+
+ sage --python test_sage_decoding.py -p secp256k1
+
+No output = good.
+"""
+
+import sys
+import csv
+from config import F
+from Xencoding_0 import Xdecode
+
+
+FILENAME_PACKET_TEST = 'packet_encoding_test_vectors.csv'
+FILENAME_XSWIFTEC_INV_TEST = 'xswiftec_inv_test_vectors.csv'
+FILENAME_ELLSWIFT_DECODE_TEST = 'ellswift_decode_test_vectors.csv'
+
+def ellswift_decode_sage(ellswift):
+ """Given a 64-byte ellswift encoded public key, get the 32-byte X coordinate."""
+
+ u = F(int.from_bytes(ellswift[:32], 'big'))
+ t = F(int.from_bytes(ellswift[32:], 'big'))
+
+ # Reimplement the input correction step.
+ if u == F(0):
+ u = F(1)
+ if t == F(0):
+ t = F(1)
+ if u**3 + t**2 + 7 == F(0):
+ t = F(2) * t
+
+ # Invoke reference code
+ x, z = Xdecode(u, t)
+
+ # Convert to bytes.
+ return int(x / z).to_bytes(32, 'big')
+
+with open(FILENAME_PACKET_TEST, newline='', encoding='utf-8') as csvfile:
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ bytes_ellswift_ours = bytes.fromhex(row['in_ellswift_ours'])
+ bytes_ellswift_theirs = bytes.fromhex(row['in_ellswift_theirs'])
+ assert row['mid_x_ours'] == ellswift_decode_sage(bytes_ellswift_ours).hex()
+ assert row['mid_x_theirs'] == ellswift_decode_sage(bytes_ellswift_theirs).hex()
+
+with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ udat = bytes.fromhex(row['u'])
+ xdat = bytes.fromhex(row['x'])
+ for case in range(8):
+ tdat = bytes.fromhex(row[f"case{case}_t"])
+ if tdat:
+ assert ellswift_decode_sage(udat + tdat) == xdat
+
+with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
+ reader = csv.DictReader(csvfile)
+ for row in reader:
+ ellswift = bytes.fromhex(row['ellswift'])
+ assert ellswift_decode_sage(ellswift).hex() == row['x']
diff --git a/bip-0324/xswiftec_inv_test_vectors.csv b/bip-0324/xswiftec_inv_test_vectors.csv
new file mode 100644
index 0000000..138c4cf
--- /dev/null
+++ b/bip-0324/xswiftec_inv_test_vectors.csv
@@ -0,0 +1,33 @@
+u,x,case0_t,case1_t,case2_t,case3_t,case4_t,case5_t,case6_t,case7_t,comment
+05ff6bdad900fc3261bc7fe34e2fb0f569f06e091ae437d3a52e9da0cbfb9590,80cdf63774ec7022c89a5a8558e373a279170285e0ab27412dbce510bdfe23fc,,,45654798ece071ba79286d04f7f3eb1c3f1d17dd883610f2ad2efd82a287466b,0aeaa886f6b76c7158452418cbf5033adc5747e9e9b5d3b2303db96936528557,,,ba9ab867131f8e4586d792fb080c14e3c0e2e82277c9ef0d52d1027c5d78b5c4,f51557790948938ea7badbe7340afcc523a8b816164a2c4dcfc24695c9ad76d8,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&ok;case7:ok
+1737a85f4c8d146cec96e3ffdca76d9903dcf3bd53061868d478c78c63c2aa9e,39e48dd150d2f429be088dfd5b61882e7e8407483702ae9a5ab35927b15f85ea,1be8cc0b04be0c681d0c6a68f733f82c6c896e0c8a262fcd392918e303a7abf4,605b5814bf9b8cb066667c9e5480d22dc5b6c92f14b4af3ee0a9eb83b03685e3,,,e41733f4fb41f397e2f3959708cc07d3937691f375d9d032c6d6e71bfc58503b,9fa4a7eb4064734f99998361ab7f2dd23a4936d0eb4b50c11f56147b4fc9764c,,,case0:ok;case1:ok;case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
+1aaa1ccebf9c724191033df366b36f691c4d902c228033ff4516d122b2564f68,c75541259d3ba98f207eaa30c69634d187d0b6da594e719e420f4898638fc5b0,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(q)];case7:bad[non_square(q)]
+2323a1d079b0fd72fc8bb62ec34230a815cb0596c2bfac998bd6b84260f5dc26,239342dfb675500a34a196310b8d87d54f49dcac9da50c1743ceab41a7b249ff,f63580b8aa49c4846de56e39e1b3e73f171e881eba8c66f614e67e5c975dfc07,b6307b332e699f1cf77841d90af25365404deb7fed5edb3090db49e642a156b6,,,09ca7f4755b63b7b921a91c61e4c18c0e8e177e145739909eb1981a268a20028,49cf84ccd19660e30887be26f50dac9abfb2148012a124cf6f24b618bd5ea579,,,case0:ok;case1:ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
+2dc90e640cb646ae9164c0b5a9ef0169febe34dc4437d6e46acb0e27e219d1e8,d236f19bf349b9516e9b3f4a5610fe960141cb23bbc8291b9534f1d71de62a47,e69df7d9c026c36600ebdf588072675847c0c431c8eb730682533e964b6252c9,4f18bbdf7c2d6c5f818c18802fa35cd069eaa79fff74e4fc837c80d93fece2f8,,,196208263fd93c99ff1420a77f8d98a7b83f3bce37148cf97dacc168b49da966,b0e7442083d293a07e73e77fd05ca32f96155860008b1b037c837f25c0131937,,,case0:ok;case1:info[v=0]&ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
+3edd7b3980e2f2f34d1409a207069f881fda5f96f08027ac4465b63dc278d672,053a98de4a27b1961155822b3a3121f03b2a14458bd80eb4a560c4c7a85c149c,,,b3dae4b7dcf858e4c6968057cef2b156465431526538199cf52dc1b2d62fda30,4aa77dd55d6b6d3cfa10cc9d0fe42f79232e4575661049ae36779c1d0c666d88,,,4c251b482307a71b39697fa8310d4ea9b9abcead9ac7e6630ad23e4c29d021ff,b558822aa29492c305ef3362f01bd086dcd1ba8a99efb651c98863e1f3998ea7,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:ok
+4295737efcb1da6fb1d96b9ca7dcd1e320024b37a736c4948b62598173069f70,fa7ffe4f25f88362831c087afe2e8a9b0713e2cac1ddca6a383205a266f14307,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
+587c1a0cee91939e7f784d23b963004a3bf44f5d4e32a0081995ba20b0fca59e,2ea988530715e8d10363907ff25124524d471ba2454d5ce3be3f04194dfd3a3c,cfd5a094aa0b9b8891b76c6ab9438f66aa1c095a65f9f70135e8171292245e74,a89057d7c6563f0d6efa19ae84412b8a7b47e791a191ecdfdf2af84fd97bc339,475d0ae9ef46920df07b34117be5a0817de1023e3cc32689e9be145b406b0aef,a0759178ad80232454f827ef05ea3e72ad8d75418e6d4cc1cd4f5306c5e7c453,302a5f6b55f464776e48939546bc709955e3f6a59a0608feca17e8ec6ddb9dbb,576fa82839a9c0f29105e6517bbed47584b8186e5e6e132020d507af268438f6,b8a2f51610b96df20f84cbee841a5f7e821efdc1c33cd9761641eba3bf94f140,5f8a6e87527fdcdbab07d810fa15c18d52728abe7192b33e32b0acf83a1837dc,case0:ok;case1:ok;case2:ok;case3:ok;case4:ok;case5:ok;case6:ok;case7:ok
+5fa88b3365a635cbbcee003cce9ef51dd1a310de277e441abccdb7be1e4ba249,79461ff62bfcbcac4249ba84dd040f2cec3c63f725204dc7f464c16bf0ff3170,,,6bb700e1f4d7e236e8d193ff4a76c1b3bcd4e2b25acac3d51c8dac653fe909a0,f4c73410633da7f63a4f1d55aec6dd32c4c6d89ee74075edb5515ed90da9e683,,,9448ff1e0b281dc9172e6c00b5893e4c432b1d4da5353c2ae3725399c016f28f,0b38cbef9cc25809c5b0e2aa513922cd3b39276118bf8a124aaea125f25615ac,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:info[v=0]&ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:info[v=0]&ok
+6fb31c7531f03130b42b155b952779efbb46087dd9807d241a48eac63c3d96d6,56f81be753e8d4ae4940ea6f46f6ec9fda66a6f96cc95f506cb2b57490e94260,,,59059774795bdb7a837fbe1140a5fa59984f48af8df95d57dd6d1c05437dcec1,22a644db79376ad4e7b3a009e58b3f13137c54fdf911122cc93667c47077d784,,,a6fa688b86a424857c8041eebf5a05a667b0b7507206a2a82292e3f9bc822d6e,dd59bb2486c8952b184c5ff61a74c0ecec83ab0206eeedd336c9983a8f8824ab,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:info[v=0]&ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:info[v=0]&ok
+704cd226e71cb6826a590e80dac90f2d2f5830f0fdf135a3eae3965bff25ff12,138e0afa68936ee670bd2b8db53aedbb7bea2a8597388b24d0518edd22ad66ec,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
+725e914792cb8c8949e7e1168b7cdd8a8094c91c6ec2202ccd53a6a18771edeb,8da16eb86d347376b6181ee9748322757f6b36e3913ddfd332ac595d788e0e44,dd357786b9f6873330391aa5625809654e43116e82a5a5d82ffd1d6624101fc4,a0b7efca01814594c59c9aae8e49700186ca5d95e88bcc80399044d9c2d8613d,,,22ca8879460978cccfc6e55a9da7f69ab1bcee917d5a5a27d002e298dbefdc6b,5f481035fe7eba6b3a63655171b68ffe7935a26a1774337fc66fbb253d279af2,,,case0:ok;case1:info[v=0]&ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
+78fe6b717f2ea4a32708d79c151bf503a5312a18c0963437e865cc6ed3f6ae97,8701948e80d15b5cd8f72863eae40afc5aced5e73f69cbc8179a33902c094d98,,,,,,,,,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
+7c37bb9c5061dc07413f11acd5a34006e64c5c457fdb9a438f217255a961f50d,5c1a76b44568eb59d6789a7442d9ed7cdc6226b7752b4ff8eaf8e1a95736e507,,,b94d30cd7dbff60b64620c17ca0fafaa40b3d1f52d077a60a2e0cafd145086c2,,,,46b2cf32824009f49b9df3e835f05055bf4c2e0ad2f8859f5d1f3501ebaf756d,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
+82388888967f82a6b444438a7d44838e13c0d478b9ca060da95a41fb94303de6,29e9654170628fec8b4972898b113cf98807f4609274f4f3140d0674157c90a0,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
+91298f5770af7a27f0a47188d24c3b7bf98ab2990d84b0b898507e3c561d6472,144f4ccbd9a74698a88cbf6fd00ad886d339d29ea19448f2c572cac0a07d5562,e6a0ffa3807f09dadbe71e0f4be4725f2832e76cad8dc1d943ce839375eff248,837b8e68d4917544764ad0903cb11f8615d2823cefbb06d89049dbabc69befda,,,195f005c7f80f6252418e1f0b41b8da0d7cd189352723e26bc317c6b8a1009e7,7c8471972b6e8abb89b52f6fc34ee079ea2d7dc31044f9276fb6245339640c55,,,case0:ok;case1:ok;case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
+b682f3d03bbb5dee4f54b5ebfba931b4f52f6a191e5c2f483c73c66e9ace97e1,904717bf0bc0cb7873fcdc38aa97f19e3a62630972acff92b24cc6dda197cb96,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:bad[non_square(s)]
+c17ec69e665f0fb0dbab48d9c2f94d12ec8a9d7eacb58084833091801eb0b80b,147756e66d96e31c426d3cc85ed0c4cfbef6341dd8b285585aa574ea0204b55e,6f4aea431a0043bdd03134d6d9159119ce034b88c32e50e8e36c4ee45eac7ae9,fd5be16d4ffa2690126c67c3ef7cb9d29b74d397c78b06b3605fda34dc9696a6,5e9c60792a2f000e45c6250f296f875e174efc0e9703e628706103a9dd2d82c7,,90b515bce5ffbc422fcecb2926ea6ee631fcb4773cd1af171c93b11aa1538146,02a41e92b005d96fed93983c1083462d648b2c683874f94c9fa025ca23696589,a1639f86d5d0fff1ba39daf0d69078a1e8b103f168fc19d78f9efc5522d27968,,case0:ok;case1:ok;case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:ok;case5:ok;case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
+c25172fc3f29b6fc4a1155b8575233155486b27464b74b8b260b499a3f53cb14,1ea9cbdb35cf6e0329aa31b0bb0a702a65123ed008655a93b7dcd5280e52e1ab,,,7422edc7843136af0053bb8854448a8299994f9ddcefd3a9a92d45462c59298a,78c7774a266f8b97ea23d05d064f033c77319f923f6b78bce4e20bf05fa5398d,,,8bdd12387bcec950ffac4477abbb757d6666b06223102c5656d2bab8d3a6d2a5,873888b5d990746815dc2fa2f9b0fcc388ce606dc09487431b1df40ea05ac2a2,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:ok
+cab6626f832a4b1280ba7add2fc5322ff011caededf7ff4db6735d5026dc0367,2b2bef0852c6f7c95d72ac99a23802b875029cd573b248d1f1b3fc8033788eb6,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
+d8621b4ffc85b9ed56e99d8dd1dd24aedcecb14763b861a17112dc771a104fd2,812cabe972a22aa67c7da0c94d8a936296eb9949d70c37cb2b2487574cb3ce58,fbc5febc6fdbc9ae3eb88a93b982196e8b6275a6d5a73c17387e000c711bd0e3,8724c96bd4e5527f2dd195a51c468d2d211ba2fac7cbe0b4b3434253409fb42d,,,043a014390243651c147756c467de691749d8a592a58c3e8c781fff28ee42b4c,78db36942b1aad80d22e6a5ae3b972d2dee45d0538341f4b4cbcbdabbf604802,,,case0:ok;case1:ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
+da463164c6f4bf7129ee5f0ec00f65a675a8adf1bd931b39b64806afdcda9a22,25b9ce9b390b408ed611a0f13ff09a598a57520e426ce4c649b7f94f2325620d,,,,,,,,,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
+dafc971e4a3a7b6dcfb42a08d9692d82ad9e7838523fcbda1d4827e14481ae2d,250368e1b5c58492304bd5f72696d27d526187c7adc03425e2b7d81dbb7e4e02,,,370c28f1be665efacde6aa436bf86fe21e6e314c1e53dd040e6c73a46b4c8c49,cd8acee98ffe56531a84d7eb3e48fa4034206ce825ace907d0edf0eaeb5e9ca2,,,c8f3d70e4199a105321955bc9407901de191ceb3e1ac22fbf1938c5a94b36fe6,327531167001a9ace57b2814c1b705bfcbdf9317da5316f82f120f1414a15f8d,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:ok;case7:ok
+e0294c8bc1a36b4166ee92bfa70a5c34976fa9829405efea8f9cd54dcb29b99e,ae9690d13b8d20a0fbbf37bed8474f67a04e142f56efd78770a76b359165d8a1,,,dcd45d935613916af167b029058ba3a700d37150b9df34728cb05412c16d4182,,,,232ba26ca9ec6e950e984fd6fa745c58ff2c8eaf4620cb8d734fabec3e92baad,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
+e148441cd7b92b8b0e4fa3bd68712cfd0d709ad198cace611493c10e97f5394e,164a639794d74c53afc4d3294e79cdb3cd25f99f6df45c000f758aba54d699c0,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
+e4b00ec97aadcca97644d3b0c8a931b14ce7bcf7bc8779546d6e35aa5937381c,94e9588d41647b3fcc772dc8d83c67ce3be003538517c834103d2cd49d62ef4d,c88d25f41407376bb2c03a7fffeb3ec7811cc43491a0c3aac0378cdc78357bee,51c02636ce00c2345ecd89adb6089fe4d5e18ac924e3145e6669501cd37a00d4,205b3512db40521cb200952e67b46f67e09e7839e0de44004138329ebd9138c5,58aab390ab6fb55c1d1b80897a207ce94a78fa5b4aa61a33398bcae9adb20d3e,3772da0bebf8c8944d3fc5800014c1387ee33bcb6e5f3c553fc8732287ca8041,ae3fd9c931ff3dcba132765249f7601b2a1e7536db1ceba19996afe22c85fb5b,dfa4caed24bfade34dff6ad1984b90981f6187c61f21bbffbec7cd60426ec36a,a7554c6f54904aa3e2e47f7685df8316b58705a4b559e5ccc6743515524deef1,case0:ok;case1:ok;case2:ok;case3:info[v=0]&ok;case4:ok;case5:ok;case6:ok;case7:info[v=0]&ok
+e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5,e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[s=0];case3:bad[s=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[s=0];case7:bad[s=0]
+e6bcb5c3d63467d490bfa54fbbc6092a7248c25e11b248dc2964a6e15edb1457,19434a3c29cb982b6f405ab04439f6d58db73da1ee4db723d69b591da124e7d8,67119877832ab8f459a821656d8261f544a553b89ae4f25c52a97134b70f3426,ffee02f5e649c07f0560eff1867ec7b32d0e595e9b1c0ea6e2a4fc70c97cd71f,b5e0c189eb5b4bacd025b7444d74178be8d5246cfa4a9a207964a057ee969992,5746e4591bf7f4c3044609ea372e908603975d279fdef8349f0b08d32f07619d,98ee67887cd5470ba657de9a927d9e0abb5aac47651b0da3ad568eca48f0c809,0011fd0a19b63f80fa9f100e7981384cd2f1a6a164e3f1591d5b038e36832510,4a1f3e7614a4b4532fda48bbb28be874172adb9305b565df869b5fa71169629d,a8b91ba6e4080b3cfbb9f615c8d16f79fc68a2d8602107cb60f4f72bd0f89a92,case0:ok;case1:info[v=0]&ok;case2:ok;case3:ok;case4:ok;case5:info[v=0]&ok;case6:ok;case7:ok
+f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6,f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6,4f867ad8bb3d840409d26b67307e62100153273f72fa4b7484becfa14ebe7408,5bbc4f59e452cc5f22a99144b10ce8989a89a995ec3cea1c91ae10e8f721bb5d,,,b079852744c27bfbf62d9498cf819deffeacd8c08d05b48b7b41305db1418827,a443b0a61bad33a0dd566ebb4ef317676576566a13c315e36e51ef1608de40d2,,,case0:ok;case1:ok;case2:bad[s=0];case3:bad[s=0];case4:ok;case5:ok;case6:bad[s=0];case7:bad[s=0]
+f455605bc85bf48e3a908c31023faf98381504c6c6d3aeb9ede55f8dd528924d,d31fbcd5cdb798f6c00db6692f8fe8967fa9c79dd10958f4a194f01374905e99,,,0c00c5715b56fe632d814ad8a77f8e66628ea47a6116834f8c1218f3a03cbd50,df88e44fac84fa52df4d59f48819f18f6a8cd4151d162afaf773166f57c7ff46,,,f3ff3a8ea4a9019cd27eb527588071999d715b859ee97cb073ede70b5fc33edf,20771bb0537b05ad20b2a60b77e60e7095732beae2e9d505088ce98fa837fce9,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&ok;case7:ok
+f58cd4d9830bad322699035e8246007d4be27e19b6f53621317b4f309b3daa9d,78ec2b3dc0948de560148bbc7c6dc9633ad5df70a5a5750cbed721804f082a3b,6c4c580b76c7594043569f9dae16dc2801c16a1fbe12860881b75f8ef929bce5,94231355e7385c5f25ca436aa64191471aea4393d6e86ab7a35fe2afacaefd0d,dff2a1951ada6db574df834048149da3397a75b829abf58c7e69db1b41ac0989,a52b66d3c907035548028bf804711bf422aba95f1a666fc86f4648e05f29caae,93b3a7f48938a6bfbca9606251e923d7fe3e95e041ed79f77e48a07006d63f4a,6bdcecaa18c7a3a0da35bc9559be6eb8e515bc6c291795485ca01d4f5350ff22,200d5e6ae525924a8b207cbfb7eb625cc6858a47d6540a73819624e3be53f2a6,5ad4992c36f8fcaab7fd7407fb8ee40bdd5456a0e599903790b9b71ea0d63181,case0:ok;case1:ok;case2:info[v=0]&ok;case3:ok;case4:ok;case5:ok;case6:info[v=0]&ok;case7:ok
+fd7d912a40f182a3588800d69ebfb5048766da206fd7ebc8d2436c81cbef6421,8d37c862054debe731694536ff46b273ec122b35a9bf1445ac3c4ff9f262c952,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
diff --git a/bip-0324/xswiftec_test_vectors.csv b/bip-0324/xswiftec_test_vectors.csv
new file mode 100644
index 0000000..985235f
--- /dev/null
+++ b/bip-0324/xswiftec_test_vectors.csv
@@ -0,0 +1,33 @@
+u,x,case0_t,case1_t,case2_t,case3_t,case4_t,case5_t,case6_t,case7_t
+08da7c45cb204377e7e42249cda5713fa865116ddbb4cb5a1949b2e5b438a6ab,e087b707dabf2796b03b2fb4f976c3f2f5abb36110d00ef656432117f2c93f0a,,,,,,,,
+0a6361b3a802f55cd5ae06101c88a1e216320fe11cc0cfe1d791eed08a1200fd,a0223bc98997647daf4d520129bdb66e4937a00d1533af1fa29645fb96fb5bb5,60a3ed14bd9df0bfb89ada9372a7b5790b123a66bf130f5788237e8cd5225de4,9c4ee4629f10220fda49532d0c859a539dec5148eefc78bf48d93d2828027a9c,fc5e72f042fd1792cbf88728a374a2cc1e03e1f9ec8813fa3692e497cfa7d5e6,cb39fac005f26dc0a383ea64cb9b3b0b26767f20232cae4486f32904df4f04e3,9f5c12eb42620f404765256c8d584a86f4edc59940ecf0a877dc81722add9e4b,63b11b9d60efddf025b6acd2f37a65ac6213aeb711038740b726c2d6d7fd8193,03a18d0fbd02e86d340778d75c8b5d33e1fc1e061377ec05c96d1b6730582649,34c6053ffa0d923f5c7c159b3464c4f4d98980dfdcd351bb790cd6fa20b0f74c
+102b51b9765a56a3e899f7cf0ee38e5251f9c503b357b330a49183eb7b155604,102b51b9765a56a3e899f7cf0ee38e5251f9c503b357b330a49183eb7b155604,bdb5bd58ca96eae36147a6c55bc2bef2cee55a757ee193cb619edc8d3590f90a,bda953c1da02059350e740b83f59149628e0be50c24ac8dc6908a2225931b4a0,,,424a42a73569151c9eb8593aa43d410d311aa58a811e6c349e612371ca6f0325,4256ac3e25fdfa6caf18bf47c0a6eb69d71f41af3db5372396f75ddca6ce478f,,
+2921a11f25dadaa24aa79a548e4e81508c2e5e56af2d833d65e2bcce448ce2f5,3a70c472406b83d9f1c4398b8ecef786499bc44a3b30c34ac30f2d8a418bffa3,b9c76c21d3fabb948fa0326bf9e999068e9eed56ee4e76cb81558aa26969c56c,ef7dd84338732a0cac3a8995f3bacf9b2896582b8d3317ed508e5d9a5a3447af,,,463893de2c05446b705fcd94061666f9716112a911b189347eaa755c969636c3,108227bcc78cd5f353c5766a0c453064d769a7d472cce812af71a264a5cbb480,,
+33b67cb5385ceddad93d0ee960679041613bed34b8b4a5e6362fe7539ba2d3ce,0105c74958a165e016502eeb87835195505d89714c95272b6fa88fe6c60b33ac,,,069e1b3b155c6da989b9b6a8735bba3c5c1049dcf01fe4474772244db89cf9ca,c77b10bca540e95ee66c1f57ab6297787849a89b2b883116e700593e3c0fe66d,,,f961e4c4eaa39256764649578ca445c3a3efb6230fe01bb8b88ddbb147630265,3884ef435abf16a11993e0a8549d688787b65764d477cee918ffa6c0c3f015c2
+3a898eecdae167231275338e9a79153cbe53f7bf99943eeb72ee64e57bb58699,41ffd7362aaa7b90fe03936deeebe9afafd9c18967122d8f972db2c050d4f07b,60abf7ed2a7ffd3d2ac242a782331ea663d55ca157af994e5e964e9c79a0db40,3c3c39dc37753ab9160dfbc2e0596c3a5114784690caa1836e12036814453da3,adcd3f100de60723f127278998c591fbf081af8e0a77f2a9090bed67d8aa2aa3,,9f540812d58002c2d53dbd587dcce1599c2aa35ea85066b1a169b162865f20ef,c3c3c623c88ac546e9f2043d1fa693c5aeeb87b96f355e7c91edfc96ebbabe8c,5232c0eff219f8dc0ed8d876673a6e040f7e5071f5880d56f6f412972755d18c,
+46e04d129d7b45d054469ce34e24069a1426b3e34f1b68a3d1bff1e070aee192,c6ce9611bd908c16eba5c599e5219de2d18d82c96aafb0180b23ee315513618f,,,,,,,,
+47dc540c94ceb704a23875c11273e16bb0b8a87aed84de911f2133568115f254,13964717dbc998964d7c19ec3d9981fe1d4a9a80845552a98fb9352898532844,,,,,,,,
+4cab73ce2a7e6220975001c8a354143267a3c1ce8bf7692313e654481e616a93,9114cf2edd3b53dbb6581290a5cca532db38b4e9ceeacc9b0437a0e49bf97211,903b600ed648d4ddc48f0f628829c8992c88fab44b692413fb8b3d783854f9a2,2952afe39557606d08c311345788a5071413580917207c86ea7cb829cf2f2c6d,05f414320d0c4004cff10f798c3fda6c4fc335b5a2db940993b3d78147a25c18,48e2531c7e3ec99f807210d6c5330114b4f04d7345535ca5a6e6abf478bdb723,6fc49ff129b72b223b70f09d77d63766d377054bb496dbec0474c286c7ab028d,d6ad501c6aa89f92f73ceecba8775af8ebeca7f6e8df8379158347d530d0cfc2,fa0bebcdf2f3bffb300ef08673c02593b03cca4a5d246bf66c4c287db85da017,b71dace381c136607f8def293accfeeb4b0fb28cbaaca35a5919540a8742450c
+5aeca385d8b781825b07bbec7c858b7170426c88088935850bc13dd6402368a5,a5135c7a27487e7da4f84413837a748e8fbd9377f776ca7af43ec228bfdc938a,8da4f71fb2700758f623d73c24ac91747da43f2302fce16c8d438a769c63495f,6b8f345fc0a25a76455541ddbf2791ff4b943c98b16db2b6eb6cea94a6b19afb,,,725b08e04d8ff8a709dc28c3db536e8b825bc0dcfd031e9372bc7588639cb2d0,9470cba03f5da589baaabe2240d86e00b46bc3674e924d491493156a594e6134,,
+707bf0b938f307b5c222e670598b865d5e1f8a8003df82c7abbf7c9f8fa4d720,8f840f46c70cf84a3ddd198fa67479a2a1e0757ffc207d385440835f705b250f,,,eab90fb459bace62d3ce8fbd69c9f1039f0627d0e93e2f42bffd87889cb236a4,157c26578b226c66daf8edfa56f7560f1131f41d1685175e6d76cc95b4f89f10,,,1546f04ba645319d2c31704296360efc60f9d82f16c1d0bd40027876634dc58b,ea83d9a874dd939925071205a908a9f0eece0be2e97ae8a1928933694b075d1f
+766caa663e1025b9accd7ededd24fbc8193180e028eedae2f41d6bb0b1d36468,22825ee826f8b76c27220e43c79c884a8518bc20f4978cc15f83f9c48346a314,,,8fe95c178da66d1dd249ea6a4dc614a6d46d79c83cbc4beafee518090263e48a,7b044cb756eb207226db302ba05e164781c2f5161dccd72607282cb9ad86a282,,,7016a3e8725992e22db61595b239eb592b928637c343b415011ae7f5fd9c17a5,84fbb348a914df8dd924cfd45fa1e9b87e3d0ae9e23328d9f8d7d345527959ad
+78a23af8da46b1b37e8767921a2d3f528fdc8eca37cea8aea775fd2b283d3776,73d5f35d96f3ce1ef5802ead8edc10787700c593b5e0ddcc3bfb2720b9d36de3,8465ad20bd0f2b4a2d37106769af46288a109bc10b527c3b033c930c0e4b1025,1b7f03bd2c915bb736622aec85601bcabec89268c98945e19a0de4126ed62524,,,7b9a52df42f0d4b5d2c8ef989650b9d775ef643ef4ad83c4fcc36cf2f1b4ec0a,e480fc42d36ea448c99dd5137a9fe43541376d973676ba1e65f21bec9129d70b,,
+78b4be1f9eeef9da65c393e4385f67edd142709b400ca7d900bd952e0c3cf727,089329e17a58a91e71ffe6ddd851e8a352e85a29fcc289b34a3bfdeaf958fe91,,,6008d703955b38da0166bd975ad3535af3b701b2efdf653fc5e7e6eb6afff0a3,,,,9ff728fc6aa4c725fe994268a52caca50c48fe4d10209ac03a18191395000b8c,
+7a2a7c0a81d1bd595dff09b918f8ecb5b5e8493654a4f83496956ed8eb017674,85d583f57e2e42a6a200f646e707134a4a17b6c9ab5b07cb696a912614fe85bb,,,,,,,,
+913da1f8df6f8fd47593840d533ba0458cc9873996bf310460abb495b34c232a,a7803f8e02b70718443a06db502c67925640e936b3fa46dd2ed6b8f7c80fa329,67d916ba2cc154464d87ff4e0cfe3bb816b22a961831c2daf62597a8b0681e87,a4b84520f8853e5482ee7689732ed7dd7da59945d26edeee0bf5f55d3507192f,,,9826e945d33eabb9b27800b1f301c447e94dd569e7ce3d2509da68564f97dda8,5b47badf077ac1ab7d1189768cd12822825a66ba2d912111f40a0aa1caf8e300,,
+96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7,7684ab3b1a43e20a97a7b5520e5b5347841a7d95984fd76b2478a2b710f1a2ce,,,,,,,,
+99be5efb88ca2013bd8e4eb035fd42d5245468fe9afa70d8ba9c1c419a48c4e8,08ee83ae5c7af0c9b2341e595fe347537272d94f2fe9f10b9a8f913279fc6230,,,,,,,,
+9b4fb24edd6d1d8830e272398263cdbf026b97392cc35387b991dc0248a628f9,80e81d40a50b53712a8dac5f468b0903c05219544a56af70aa152ebf17887701,,,6e94af5a32ac100c5230f1e119c538742b7051934b02f3850522cff26bd32d97,e9bd309fbf041342311be3d5bab0b9d16c9f80c6640eb47e311d3178c2adc75d,,,916b50a5cd53eff3adcf0e1ee63ac78bd48fae6cb4fd0c7afadd300c942cce98,1642cf6040fbecbdcee41c2a454f462e93607f399bf14b81cee2ce863d5234d2
+9def996cb1ea87e596b6cadccca3839a352e99d9ce07e635cdb239f38ca294f8,294850a665ab014a0e75eb4b52ee66dd8a8d2b5e453074e58afacb5e019ee90a,b1a29367b95e1996f7e393fb389e7ace812d4135f6ddcdcd77467fc000dfca8c,a340aabc95b4000e3043ba6139178c450046c985fbf09676c440bc6430ddaa5b,4c4cd400d0be335dd651370c5565c2b742a298016212a8605187b3c0751a811e,d90fa208bbb5f3f6e16c5a42b419188ec1951c1eb358f04741b7b48df9e55f79,4e5d6c9846a1e669081c6c04c76185317ed2beca0922323288b9803eff2031a3,5cbf55436a4bfff1cfbc459ec6e873baffb9367a040f69893bbf439acf2251d4,b3b32bff2f41cca229aec8f3aa9a3d48bd5d67fe9ded579fae784c3e8ae57b11,26f05df7444a0c091e93a5bd4be6e7713e6ae3e14ca70fb8be484b71061a9cb6
+a2c4aed1cf757cd9a509734a267ffc7b1166b55f4c8f9c3e3550c56e743328fc,a2c4aed1cf757cd9a509734a267ffc7b1166b55f4c8f9c3e3550c56e743328fc,,,,,,,,
+a8e437abf9c0e74dc6d51eabf2d261a00e785c7e21efeac1f322b610273ba066,5a64cce4be767964e7dba23e78e30149326c539353b647e0d5d7cc361943b13b,,,6f73bdd6b748790b5f788935ca02aee3b9e560c4ba6caf47d716fbde1dd6e92c,b1ff705694188e672f58c6a05eeecc379dd1b60fd3cb9f19fcb02b1d9cab4bc5,,,908c422948b786f4a08776ca35fd511c461a9f3b459350b828e90420e2291303,4e008fa96be77198d0a7395fa11133c8622e49f02c3460e6034fd4e16354b06a
+bf60e4349cace6bce0d552e8d783428db66d0d649bd9e430a3627e2ee14ac839,409f1bcb635319431f2aad17287cbd724992f29b64261bcf5c9d81d01eb533f6,,,,,,,,
+c0ba8a33ac67f44abff5984dfbb6f56c46b880ac2b86e1f23e7fa9c402c53ae7,4767c4cab0d08133980a8e66c3f93a055c8ae62f89a92f8dcfa47607cee0bc57,4c21052f5ffccadb4f707aa1cba828ef384d7861af1690c59d638dfee9f368e7,dbcc8fe22896478161452d44688a6b138050a4d0964470c175a521dcecc5519a,,,b3defad0a0033524b08f855e3457d710c7b2879e50e96f3a629c7200160c9348,2433701dd769b87e9ebad2bb977594ec7faf5b2f69bb8f3e8a5ade22133aaa95,,
+cbe2268747c9c8072c7f9926f2288f270637dc55bb9d14d3368361d5e47d25be,0e4e25736b614910c4984843e606b1e229def08bfd672ab61e2707cde8248c6d,,,c30567184201fac8e1cb9e776d921e17d28cdb7333223abd1c8f860a16393df1,,,,3cfa98e7bdfe05371e346188926de1e82d73248cccddc542e37079f4e9c6be3e,
+ceb827ad3d3884fd4d50ae6099d6d50c09a21e72ebd309708e8b69d93df19e55,a6a0c8c94462f16f1b92502c3d5f9d1618f12ffa756227d5b19b01b9373cd940,,,,,,,,
+d57e9d4f5842134f140032eaf38b5333638e8c4b145fcf86a23d48d3e9acc0f8,2a8162b0a7bdecb0ebffcd150c74accc9c7173b4eba030795dc2b72b16533b37,349a9a592d2c56e5378ae869d646043fc09ffb8fe5fd9debd83a11274da08892,9875f58028cc991cafab9fb1183b350bc1d8d5ce5723813cc2b8434ed1a2100f,,,cb6565a6d2d3a91ac875179629b9fbc03f6004701a02621427c5eed7b25f739d,678a0a7fd73366e35054604ee7c4caf43e272a31a8dc7ec33d47bcb02e5dec20,,
+d94e7f1e9bb1f8a9b90996ba12c461b84956f0e7f230145cc594c2f80b067aa0,b4f4632803cff65c013a566748cd3386d58cd3a28f5b4721056cbe9d278a67a4,,,fad51eda7d418ee2785df9f3788ac9152576312177fc0fd83c65036750581620,749259382784be63f86cc927a5defa6aa8cecb98e38d68f6b7a7e958303c94ad,,,052ae12582be711d87a2060c877536eada89cede8803f027c39afc97afa7e60f,8b6da6c7d87b419c079336d85a210595573134671c729709485816a6cfc36782
+e545d395bb3fd971f91bf9a2b6722831df704efae6c1aa9da0989ed0970b77bb,760486143a1d512da5219d3e5febc7c5c9990d21ca7a501ed23f86c91ddee4cf,,,090892960a84c69967fe5a5d014d3ca19173e4cb72a908586fbce9d1e531a265,42a47f65d00ff2004faa98865ee8ed4f8a9a5ddc9f75042d728de335664bb546,,,f6f76d69f57b39669801a5a2feb2c35e6e8c1b348d56f7a79043162d1ace59ca,bd5b809a2ff00dffb0556779a11712b07565a223608afbd28d721cc999b446e9
+e9f86cefcfd61558fe75da7d4ea48a6c82d93191c6d49579aab49f99e543dcad,5db7371325a7bb83b030691b2d87cd9f199f43d91e302568391ac48181b7cea6,,,,,,,,
+eec4121f2a07b61aba16414812aa9afc39ab0a136360a5ace2240dc19b0464eb,0b623c5296c13218a1eb24e79d00b04bf15788f6c2f7ec100a4a16f1473124a2,,,,,,,,
+f566cc6fccc657365c0197accf3a7d6f80f85209ff666ff774f4dcbc524aa842,0a9933903339a8c9a3fe685330c582907f07adf6009990088b0b2342adb553ed,3ab8dc4ecbc0441c685436ac0d76f16393769c353be6092bd6ec4ce094106bd8,3bd189b4ef3d1baa5610f2b14cb4a2b377eb171511e6f36ef6a05a2c7c52e368,1594764c6296402aadd123675d81f3505d35f2a52c52881568eadb7b675b53f0,c64fbf71138e66de8ce0abdf3b6f51d151ca8e1037ab5b979e62b2faa15be81c,c54723b1343fbbe397abc953f2890e9c6c8963cac419f6d42913b31e6bef9057,c42e764b10c2e455a9ef0d4eb34b5d4c8814e8eaee190c91095fa5d283ad18c7,ea6b89b39d69bfd5522edc98a27e0cafa2ca0d5ad3ad77ea9715248398a4a83f,39b0408eec719921731f5420c490ae2eae3571efc854a468619d4d045ea41413
diff --git a/bip-0327.mediawiki b/bip-0327.mediawiki
new file mode 100644
index 0000000..07b40f5
--- /dev/null
+++ b/bip-0327.mediawiki
@@ -0,0 +1,829 @@
+<pre>
+ BIP: 327
+ Title: MuSig2 for BIP340-compatible Multi-Signatures
+ Author: Jonas Nick <jonasd.nick@gmail.com>
+ Tim Ruffing <crypto@timruffing.de>
+ Elliott Jin <elliott.jin@gmail.com>
+ Status: Draft
+ License: BSD-3-Clause
+ Type: Informational
+ Created: 2022-03-22
+ Post-History: 2022-04-05: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-April/020198.html [bitcoin-dev] MuSig2 BIP
+ 2022-10-11: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html [bitcoin-dev] MuSig2 BIP
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0327
+</pre>
+
+== Introduction ==
+
+=== Abstract ===
+
+This document proposes a standard for the [https://eprint.iacr.org/2020/1261.pdf MuSig2] multi-signature scheme.
+The standard is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] public keys and signatures.
+It supports ''tweaking'', which allows deriving [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] child keys from aggregate public keys and creating [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] Taproot outputs with key and script paths.
+
+=== Copyright ===
+
+This document is licensed under the 3-clause BSD license.
+
+=== Motivation ===
+
+MuSig2 is a multi-signature scheme that allows multiple signers to create a single aggregate public key and cooperatively create ordinary Schnorr signatures valid under the aggregate public key.
+Signing requires interaction between ''all'' signers involved in key aggregation.
+(MuSig2 is a ''n-of-n'' multi-signature scheme and not a ''t-of-n'' threshold-signature scheme.)
+
+The primary motivation is to create a standard that allows users of different software projects to jointly control Taproot outputs ([https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341]).
+Such an output contains a public key which, in this case, would be the aggregate of all users' individual public keys.
+It can be spent using MuSig2 to produce a signature for the key-based spending path.
+
+The on-chain footprint of a MuSig2 Taproot output is essentially a single BIP340 public key, and a transaction spending the output only requires a single signature cooperatively produced by all signers. This is '''more compact''' and has '''lower verification cost''' than each signer providing an individual public key and signature, as would be required by an ''n-of-n'' policy implemented using <code>OP_CHECKSIGADD</code> as introduced in ([https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki BIP342]).
+As a side effect, the number ''n'' of signers is not limited by any consensus rules when using MuSig2.
+
+Moreover, MuSig2 offers a '''higher level of privacy''' than <code>OP_CHECKSIGADD</code>: MuSig2 Taproot outputs are indistinguishable for a blockchain observer from regular, single-signer Taproot outputs even though they are actually controlled by multiple signers. By tweaking an aggregate public key, the shared Taproot output can have script spending paths that are hidden unless used.
+
+There are multi-signature schemes other than MuSig2 that are fully compatible with Schnorr signatures.
+The MuSig2 variant proposed below stands out by combining all the following features:
+* '''Simple Key Setup''': Key aggregation is non-interactive and fully compatible with BIP340 public keys.
+* '''Two Communication Rounds''': MuSig2 is faster in practice than previous three-round multi-signature schemes such as [https://eprint.iacr.org/2018/068.pdf MuSig1], particularly when signers are connected through high-latency anonymous links. Moreover, the need for fewer communication rounds simplifies the algorithms and reduces the probability that implementations and users make security-relevant mistakes.
+* '''Provable security''': MuSig2 has been [https://eprint.iacr.org/2020/1261.pdf proven existentially unforgeable] under the algebraic one-more discrete logarithm (AOMDL) assumption (instead of the discrete logarithm assumption required for single-signer Schnorr signatures). AOMDL is a falsifiable and weaker variant of the well-studied OMDL problem.
+* '''Low complexity''': MuSig2 has a substantially lower computational and implementation complexity than alternative schemes like [https://eprint.iacr.org/2020/1057 MuSig-DN]. However, this comes at the cost of having no ability to generate nonces deterministically and the requirement to securely handle signing state.
+
+=== Design ===
+
+* '''Compatibility with BIP340''': In this proposal, the aggregate public key is a BIP340 X-only public key, and the signature output at the end of the signing protocol is a BIP340 signature that passes BIP340 verification for the aggregate public key and a message. The individual public keys that are input to the key aggregation algorithm are ''plain'' public keys in compressed format.
+* '''Tweaking for BIP32 derivations and Taproot''': This proposal supports tweaking aggregate public keys and signing for tweaked aggregate public keys. We distinguish two modes of tweaking: ''Plain'' tweaking can be used to derive child aggregate public keys per [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32]. ''X-only'' tweaking, on the other hand, allows creating a [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] tweak to add script paths to a Taproot output. See [[#tweaking-the-aggregate-public-key|below]] for details.
+* '''Non-interactive signing with preprocessing''': The first communication round, exchanging the nonces, can happen before the message or the exact set of signers is determined. Once the parameters of the signing session are finalized, the signers can send partial signatures without additional interaction.
+* '''Key aggregation optionally independent of order''': The output of the key aggregation algorithm depends on the order in which the individual public keys are provided as input. Key aggregation does not sort the individual public keys by default because applications often already have a canonical order of signers. Nonetheless, applications can mandate sorting before aggregation,<ref>Applications that sort individual public keys before aggregation should ensure that the implementation of sorting is reasonably efficient, and in particular does not degenerate to quadratic runtime on pathological inputs.</ref> and this proposal specifies a canonical order to sort the individual public keys before key aggregation. Sorting will ensure the same output, independent of the initial order.
+* '''Third-party nonce and partial signature aggregation''': Instead of every signer sending their nonce and partial signature to every other signer, it is possible to use an untrusted third-party ''aggregator'' in order to reduce the communication complexity from quadratic to linear in the number of signers. In each of the two rounds, the aggregator collects all signers' contributions (nonces or partial signatures), aggregates them, and broadcasts the aggregate back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
+* '''Partial signature verification''': If any signer sends a partial signature contribution that was not created by honestly following the signing protocol, the signing session will fail to produce a valid Schnorr signature. This proposal specifies a partial signature verification algorithm to identify disruptive signers. It is incompatible with third-party nonce aggregation because the individual nonce is required for partial verification.
+* '''MuSig2* optimization''': This proposal uses an optimized scheme MuSig2*, which allows saving a point multiplication in key aggregation as compared to MuSig2. MuSig2* is proven secure in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]. The optimization consists of assigning the constant key aggregation coefficient ''1'' to the second distinct key in the list of individual public keys to be aggregated (as well as to any key identical to this key).
+* '''Size of the nonce and security''': In this proposal, each signer's nonce consists of two elliptic curve points. The [https://eprint.iacr.org/2020/1261 MuSig2 paper] gives distinct security proofs depending on the number of points that constitute a nonce. See section [[#choosing-the-size-of-the-nonce|Choosing the Size of the Nonce]] for a discussion.
+
+== Overview ==
+
+Implementers must make sure to understand this section thoroughly to avoid subtle mistakes that may lead to catastrophic failure.
+
+=== Optionality of Features ===
+
+The goal of this proposal is to support a wide range of possible application scenarios.
+Given a specific application scenario, some features may be unnecessary or not desirable, and implementers can choose not to support them.
+Such optional features include:
+* Applying plain tweaks after x-only tweaks.
+* Applying tweaks at all.
+* Dealing with messages that are not exactly 32 bytes.
+* Identifying a disruptive signer after aborting (aborting itself remains mandatory).
+* Dealing with duplicate individual public keys in key aggregation.
+If applicable, the corresponding algorithms should simply fail when encountering inputs unsupported by a particular implementation. (For example, the signing algorithm may fail when given a message which is not 32 bytes.)
+Similarly, the test vectors that exercise the unimplemented features should be re-interpreted to expect an error, or be skipped if appropriate.
+
+=== General Signing Flow ===
+
+The signers start by exchanging their individual public keys and computing an aggregate public key using the ''KeyAgg'' algorithm.
+Whenever they want to sign a message, the basic order of operations to create a multi-signature is as follows:
+
+'''First broadcast round:'''
+The signers start the signing session by running ''NonceGen'' to compute ''secnonce'' and ''pubnonce''.<ref>We treat the ''secnonce'' and ''pubnonce'' as grammatically singular even though they include serializations of two scalars and two elliptic curve points, respectively. This treatment may be confusing for readers familiar with the MuSig2 paper. However, serialization is a technical detail that is irrelevant for users of MuSig2 interfaces.</ref>
+Then, the signers broadcast their ''pubnonce'' to each other and run ''NonceAgg'' to compute an aggregate nonce.
+
+'''Second broadcast round:'''
+At this point, every signer has the required data to sign, which, in the algorithms specified below, is stored in a data structure called [[#session-context|Session Context]].
+Every signer computes a partial signature by running ''Sign'' with the secret signing key, the ''secnonce'' and the session context.
+Then, the signers broadcast their partial signatures to each other and run ''PartialSigAgg'' to obtain the final signature.
+If all signers behaved honestly, the result passes [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] verification.
+
+Both broadcast rounds can be optimized by using an aggregator who collects all signers' nonces or partial signatures, aggregates them using ''NonceAgg'' or ''PartialSigAgg'', respectively, and broadcasts the aggregate result back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme, i.e., even a malicious aggregator colluding with all but one signer cannot forge a signature.
+
+'''IMPORTANT''': The ''Sign'' algorithm must '''not''' be executed twice with the same ''secnonce''.
+Otherwise, it is possible to extract the secret signing key from the two partial signatures output by the two executions of ''Sign''.
+To avoid accidental reuse of ''secnonce'', an implementation may securely erase the ''secnonce'' argument by overwriting it with 64 zero bytes after it has been read by ''Sign''.
+A ''secnonce'' consisting of only zero bytes is invalid for ''Sign'' and will cause it to fail.
+
+To simplify the specification of the algorithms, some intermediary values are unnecessarily recomputed from scratch, e.g., when executing ''GetSessionValues'' multiple times.
+Actual implementations can cache these values.
+As a result, the [[#session-context|Session Context]] may look very different in implementations or may not exist at all.
+However, computation of ''GetSessionValues'' and storage of the result must be protected against modification from an untrusted third party.
+This party would have complete control over the aggregate public key and message to be signed.
+
+=== Public Key Aggregation ===
+
+We distinguish between two public key types, namely ''plain public keys'', the key type traditionally used in Bitcoin, and ''X-only public keys''.
+Plain public keys are byte strings of length 33 (often called ''compressed'' format).
+In contrast, X-only public keys are 32-byte strings defined in [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
+
+The individual public keys of signers as input to the key aggregation algorithm ''KeyAgg'' (and to ''GetSessionValues'' and ''PartialSigVerify'') are plain public keys.
+The output of ''KeyAgg'' is a [[#keyagg-context|KeyAgg Context]] which stores information required for tweaking the aggregate public key (see [[#tweaking-the-aggregate-public-key|below]]),
+and it can be used to produce an X-only aggregate public key, or a plain aggregate public key.
+In order to obtain an X-only public key compatible with BIP340 verification, implementations call the ''GetXonlyPubkey'' function with the KeyAgg Context.
+To get the plain aggregate public key, which is required for some applications of [[#tweaking-the-aggregate-public-key|tweaking]], implementations call ''GetPlainPubkey'' instead.
+
+The aggregate public key produced by ''KeyAgg'' (regardless of the type) depends on the order of the individual public keys.
+If the application does not have a canonical order of the signers, the individual public keys can be sorted with the ''KeySort'' algorithm to ensure that the aggregate public key is independent of the order of signers.
+
+The same individual public key is allowed to occur more than once in the input of ''KeyAgg'' and ''KeySort''.
+This is by design: All algorithms in this proposal handle multiple signers who (claim to) have identical individual public keys properly,
+and applications are not required to check for duplicate individual public keys.
+In fact, applications are recommended to omit checks for duplicate individual public keys in order to simplify error handling.
+Moreover, it is often impossible to tell at key aggregation which signer is to blame for the duplicate, i.e., which signer came up with an individual public key honestly and which disruptive signer copied it.
+In contrast, MuSig2 is designed to identify disruptive signers at signing time (see [[#identifiying-disruptive-signers|Identifiying Disruptive Signers]]).
+
+While the algorithms in this proposal are able to handle duplicate individual public keys, there are scenarios where applications may choose to abort when encountering duplicates.
+For example, we can imagine a scenario where a single entity creates a MuSig2 setup with multiple signing devices.
+In that case, duplicates may not result from a malicious signing device copying an individual public key of another signing device but from accidental initialization of two devices with the same seed.
+Since MuSig2 key aggregation would accept the duplicate keys and not error out, which would in turn reduce the security compared to the intended key setup, applications may reject duplicate individual public keys before passing them to MuSig2 key aggregation and ask the user to investigate.
+
+=== Nonce Generation ===
+
+'''IMPORTANT''': ''NonceGen'' must have access to a high-quality random generator to draw an unbiased, uniformly random value ''rand' ''.
+In contrast to BIP340 signing, the values ''k<sub>1</sub>'' and ''k<sub>2</sub>'' '''must not be derived deterministically''' from the session parameters because otherwise active adversaries can [https://medium.com/blockstream/musig-dn-schnorr-multisignatures-with-verifiably-deterministic-nonces-27424b5df9d6#e3b6 trick the victim into reusing a nonce].
+
+The optional arguments to ''NonceGen'' enable a defense-in-depth mechanism that may prevent secret key exposure if ''rand' '' is accidentally not drawn uniformly at random.
+If the value ''rand' '' was identical in two ''NonceGen'' invocations, but any other argument was different, the ''secnonce'' would still be guaranteed to be different as well (with overwhelming probability), and thus accidentally using the same ''secnonce'' for ''Sign'' in both sessions would be avoided.
+Therefore, it is recommended to provide the optional arguments ''sk'', ''aggpk'', and ''m'' if these session parameters are already determined during nonce generation.
+The auxiliary input ''extra_in'' can contain additional contextual data that has a chance of changing between ''NonceGen'' runs,
+e.g., a supposedly unique session id (taken from the application), a session counter wide enough not to repeat in practice, any nonces by other signers (if already known), or the serialization of a data structure containing multiple of the above.
+However, the protection provided by the optional arguments should only be viewed as a last resort.
+In most conceivable scenarios, the assumption that the arguments are different between two executions of ''NonceGen'' is relatively strong, particularly when facing an active adversary.
+
+In some applications, it is beneficial to generate and send a ''pubnonce'' before the other signers, their individual public keys, or the message to sign is known.
+In this case, only the available arguments are provided to the ''NonceGen'' algorithm.
+After this preprocessing phase, the ''Sign'' algorithm can be run immediately when the message and set of signers is determined.
+This way, the final signature is created quicker and with fewer round trips.
+However, applications that use this method presumably store the nonces for a longer time and must therefore be even more careful not to reuse them.
+Moreover, this method is not compatible with the defense-in-depth mechanism described in the previous paragraph.
+
+Instead of every signer broadcasting their ''pubnonce'' to every other signer, the signers can send their ''pubnonce'' to a single aggregator node that runs ''NonceAgg'' and sends the ''aggnonce'' back to the signers.
+This technique reduces the overall communication.
+A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
+
+In general, MuSig2 signers are stateful in the sense that they first generate ''secnonce'' and then need to store it until they receive the other signers' ''pubnonces'' or the ''aggnonce''.
+However, it is possible for one of the signers to be stateless.
+This signer waits until it receives the ''pubnonce'' of all the other signers and until session parameters such as a message to sign, individual public keys, and tweaks are determined.
+Then, the signer can run ''NonceGen'', ''NonceAgg'' and ''Sign'' in sequence and send out its ''pubnonce'' along with its partial signature.
+Stateless signers may want to consider signing deterministically (see [[#modifications-to-nonce-generation|Modifications to Nonce Generation]]) to remove the reliance on the random number generator in the ''NonceGen'' algorithm.
+
+=== Identifying Disruptive Signers ===
+
+The signing protocol makes it possible to identify malicious signers who send invalid contributions to a signing session in order to make the signing session abort and prevent the honest signers from obtaining a valid signature.
+This property is called "identifiable aborts" and ensures that honest parties can assign blame to malicious signers who cause an abort in the signing protocol.
+
+Aborts are identifiable for an honest party if the following conditions hold in a signing session:
+* The contributions received from all signers have not been tampered with (e.g., because they were sent over authenticated connections).
+* Nonce aggregation is performed honestly (e.g., because the honest signer performs nonce aggregation on its own or because the aggregator is trusted).
+* The partial signatures received from all signers are verified using the algorithm ''PartialSigVerify''.
+
+If these conditions hold and an honest party (signer or aggregator) runs an algorithm that fails due to invalid protocol contributions from malicious signers, then the algorithm run by the honest party will output the index of exactly one malicious signer.
+Additionally, if the honest parties agree on the contributions sent by all signers in the signing session, all the honest parties who run the aborting algorithm will identify the same malicious signer.
+
+==== Further Remarks ====
+
+Some of the algorithms specified below may also assign blame to a malicious aggregator.
+While this is possible for some particular misbehavior of the aggregator, it is not guaranteed that a malicious aggregator can be identified.
+More specifically, a malicious aggregator (whose existence violates the second condition above) can always make signing abort and wrongly hold honest signers accountable for the abort (e.g., by claiming to have received an invalid contribution from a particular honest signer).
+
+The only purpose of the algorithm ''PartialSigVerify'' is to ensure identifiable aborts, and it is not necessary to use it when identifiable aborts are not desired.
+In particular, partial signatures are ''not'' signatures.
+An adversary can forge a partial signature, i.e., create a partial signature without knowing the secret key for the claimed individual public key.<ref>Assume an adversary wants to forge a partial signature for individual public key ''P''. It joins the signing session pretending to be two different signers, one with individual public key ''P'' and one with another individual public key. The adversary can then set the second signer's nonce such that it will be able to produce a partial signature for ''P'' but not for the other claimed signer. An explanation of the individual steps required to create a partial signature forgery can be found in [https://gist.github.com/AdamISZ/ca974ed67889cedc738c4a1f65ff620b a write up by Adam Gibson].</ref>
+However, if ''PartialSigVerify'' succeeds for all partial signatures then ''PartialSigAgg'' will return a valid Schnorr signature.<ref>Given a list of individual public keys, it is an open question whether a BIP-340 signature valid under the corresponding aggregate public key is a proof of knowledge of all secret keys of the individual public keys.</ref>
+
+=== Tweaking the Aggregate Public Key ===
+
+The aggregate public key can be ''tweaked'', which modifies the key as defined in the [[#tweaking-definition|Tweaking Definition]] subsection.
+In order to apply a tweak, the KeyAgg Context output by ''KeyAgg'' is provided to the ''ApplyTweak'' algorithm with the ''is_xonly_t'' argument set to false for plain tweaking and true for X-only tweaking.
+The resulting KeyAgg Context can be used to apply another tweak with ''ApplyTweak'' or obtain the aggregate public key with ''GetXonlyPubkey'' or ''GetPlainPubkey''.
+
+In addition to individual public keys, the ''KeyAgg'' algorithm accepts tweaks, which modify the aggregate public key as defined in the [[#tweaking-definition|Tweaking Definition]] subsection.
+For example, if ''KeyAgg'' is run with ''v = 2'', ''is_xonly_t<sub>1</sub> = false'', ''is_xonly_t<sub>2</sub> = true'', then the aggregate key is first plain tweaked with ''tweak<sub>1</sub>'' and then X-only tweaked with ''tweak<sub>2</sub>''.
+
+The purpose of supporting tweaking is to ensure compatibility with existing uses of tweaking, i.e., that the result of signing is a valid signature for the tweaked public key.
+The MuSig2 algorithms take arbitrary tweaks as input but accepting arbitrary tweaks may negatively affect the security of the scheme.<ref>It is an open question whether allowing arbitrary tweaks from an adversary affects the unforgeability of MuSig2.</ref>
+Instead, signers should obtain the tweaks according to other specifications.
+This typically involves deriving the tweaks from a hash of the aggregate public key and some other information.
+Depending on the specific scheme that is used for tweaking, either the plain or the X-only aggregate public key is required.
+For example, to do [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] derivation, you call ''GetPlainPubkey'' to be able to compute the tweak, whereas [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] TapTweaks require X-only public keys that are obtained with ''GetXonlyPubkey''.
+
+The tweak mode provided to ''ApplyTweak'' depends on the application:
+Plain tweaking can be used to derive child public keys from an aggregate public key using [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32].
+On the other hand, X-only tweaking is required for Taproot tweaking per [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341].
+A Taproot-tweaked public key commits to a ''script path'', allowing users to create transaction outputs that are spendable either with a MuSig2 multi-signature or by providing inputs that satisfy the script path.
+Script path spends require a control block that contains a parity bit for the tweaked X-only public key.
+The bit can be obtained with ''GetPlainPubkey(keyagg_ctx)[0] & 1''.
+
+== Algorithms ==
+
+The following specification of the algorithms has been written with a focus on clarity.
+As a result, the specified algorithms are not always optimal in terms of computation and space.
+In particular, some values are recomputed but can be cached in actual implementations (see [[#signing-flow|Signing Flow]]).
+
+=== Notation ===
+
+The following conventions are used, with constants as defined for [https://www.secg.org/sec2-v2.pdf secp256k1]. We note that adapting this proposal to other elliptic curves is not straightforward and can result in an insecure scheme.
+* Lowercase variables represent integers or byte arrays.
+** The constant ''p'' refers to the field size, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F''.
+** The constant ''n'' refers to the curve order, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141''.
+* Uppercase variables refer to points on the curve with equation ''y<sup>2</sup> = x<sup>3</sup> + 7'' over the integers modulo ''p''.
+** ''is_infinite(P)'' returns whether ''P'' is the point at infinity.
+** ''x(P)'' and ''y(P)'' are integers in the range ''0..p-1'' and refer to the X and Y coordinates of a point ''P'' (assuming it is not infinity).
+** The constant ''G'' refers to the base point, for which ''x(G) = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798'' and ''y(G) = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8''.
+** Addition of points refers to the usual [https://en.wikipedia.org/wiki/Elliptic_curve#The_group_law elliptic curve group operation].
+** [https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication Multiplication (⋅) of an integer and a point] refers to the repeated application of the group operation.
+* Functions and operations:
+** ''||'' refers to byte array concatenation.
+** The function ''x[i:j]'', where ''x'' is a byte array and ''i, j &ge; 0'', returns a ''(j - i)''-byte array with a copy of the ''i''-th byte (inclusive) to the ''j''-th byte (exclusive) of ''x''.
+** The function ''bytes(n, x)'', where ''x'' is an integer, returns the n-byte encoding of ''x'', most significant byte first.
+** The constant ''empty_bytestring'' refers to the empty byte array. It holds that ''len(empty_bytestring) = 0''.
+** The function ''xbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''bytes(32, x(P))''.
+** The function ''len(x)'' where ''x'' is a byte array returns the length of the array.
+** The function ''has_even_y(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''y(P) mod 2 == 0''.
+** The function ''with_even_y(P)'', where ''P'' is a point, returns ''P'' if ''is_infinite(P)'' or ''has_even_y(P)''. Otherwise, ''with_even_y(P)'' returns ''-P''.
+** The function ''cbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''a || xbytes(P)'' where ''a'' is a byte that is ''2'' if ''has_even_y(P)'' and ''3'' otherwise.
+** The function ''cbytes_ext(P)'', where ''P'' is a point, returns ''bytes(33, 0)'' if ''is_infinite(P)''. Otherwise, it returns ''cbytes(P)''.
+** The function ''int(x)'', where ''x'' is a 32-byte array, returns the 256-bit unsigned integer whose most significant byte first encoding is ''x''.
+** The function ''lift_x(x)'', where ''x'' is an integer in range ''0..2<sup>256</sup>-1'', returns the point ''P'' for which ''x(P) = x''<ref>
+ Given a candidate X coordinate ''x'' in the range ''0..p-1'', there exist either exactly two or exactly zero valid Y coordinates. If no valid Y coordinate exists, then ''x'' is not a valid X coordinate either, i.e., no point ''P'' exists for which ''x(P) = x''. The valid Y coordinates for a given candidate ''x'' are the square roots of ''c = x<sup>3</sup> + 7 mod p'' and they can be computed as ''y = &plusmn;c<sup>(p+1)/4</sup> mod p'' (see [https://en.wikipedia.org/wiki/Quadratic_residue#Prime_or_prime_power_modulus Quadratic residue]) if they exist, which can be checked by squaring and comparing with ''c''.</ref> and ''has_even_y(P)'', or fails if ''x'' is greater than ''p-1'' or no such point exists. The function ''lift_x(x)'' is equivalent to the following pseudocode:
+*** Fail if ''x &gt; p-1''.
+*** Let ''c = x<sup>3</sup> + 7 mod p''.
+*** Let ''y' = c<sup>(p+1)/4</sup> mod p''.
+*** Fail if ''c &ne; y'<sup>2</sup> mod p''.
+*** Let ''y = y' '' if ''y' mod 2 = 0'', otherwise let ''y = p - y' ''.
+*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y''.
+** The function ''cpoint(x)'', where ''x'' is a 33-byte array (compressed serialization), sets ''P = lift_x(int(x[1:33]))'' and fails if that fails. If ''x[0] = 2'' it returns ''P'' and if ''x[0] = 3'' it returns ''-P''. Otherwise, it fails.
+** The function ''cpoint_ext(x)'', where ''x'' is a 33-byte array (compressed serialization), returns the point at infinity if ''x = bytes(33, 0)''. Otherwise, it returns ''cpoint(x)'' and fails if that fails.
+** The function ''hash<sub>tag</sub>(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''.
+* Other:
+** Tuples are written by listing the elements within parentheses and separated by commas. For example, ''(2, 3, 1)'' is a tuple.
+
+=== Key Generation and Aggregation ===
+
+==== Key Generation of an Individual Signer ====
+
+<div>
+Algorithm ''IndividualPubkey(sk)'':<ref>The ''IndividualPubkey'' algorithm matches the key generation procedure traditionally used for ECDSA in Bitcoin</ref>
+* Inputs:
+** The secret key ''sk'': a 32-byte array, freshly generated uniformly at random
+* Let ''d' = int(sk)''.
+* Fail if ''d' = 0'' or ''d' &ge; n''.
+* Return ''cbytes(d'⋅G)''.
+</div>
+
+==== KeyAgg Context ====
+
+The KeyAgg Context is a data structure consisting of the following elements:
+* The point ''Q'' representing the potentially tweaked aggregate public key: an elliptic curve point
+* The accumulated tweak ''tacc'': an integer with ''0 &le; tacc < n''
+* The value ''gacc'' : 1 or -1 mod n
+
+We write "Let ''(Q, gacc, tacc) = keyagg_ctx''" to assign names to the elements of a KeyAgg Context.
+
+<div>
+Algorithm ''GetXonlyPubkey(keyagg_ctx)'':
+* Let ''(Q, _, _) = keyagg_ctx''
+* Return ''xbytes(Q)''
+</div>
+
+<div>
+Algorithm ''GetPlainPubkey(keyagg_ctx)'':
+* Let ''(Q, _, _) = keyagg_ctx''
+* Return ''cbytes(Q)''
+</div>
+
+==== Key Sorting ====
+
+<div>
+Algorithm ''KeySort(pk<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* Return ''pk<sub>1..u</sub>'' sorted in lexicographical order.
+</div>
+
+==== Key Aggregation ====
+
+<div>
+Algorithm ''KeyAgg(pk<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)''
+* For ''i = 1 .. u'':
+** Let ''P<sub>i</sub> = cpoint(pk<sub>i</sub>)''; fail if that fails and blame signer ''i'' for invalid individual public key.
+** Let ''a<sub>i</sub> = KeyAggCoeffInternal(pk<sub>1..u</sub>, pk<sub>i</sub>, pk2)''.
+* Let ''Q = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''
+* Fail if ''is_infinite(Q)''.
+* Let ''gacc = 1''
+* Let ''tacc = 0''
+* Return ''keyagg_ctx = (Q, gacc, tacc)''.
+</div>
+
+<div>
+Internal Algorithm ''HashKeys(pk<sub>1..u</sub>)'':
+* Return ''hash<sub>KeyAgg list</sub>(pk<sub>1</sub> || pk<sub>2</sub> || ... || pk<sub>u</sub>)''
+</div>
+
+<div>
+Internal Algorithm ''GetSecondKey(pk<sub>1..u</sub>)'':
+* For ''j = 1 .. u'':
+** If ''pk<sub>j</sub> &ne; pk<sub>1</sub>'':
+*** Return ''pk<sub>j</sub>''
+* Return ''bytes(33, 0)''
+</div>
+
+<div>
+Internal Algorithm ''KeyAggCoeff(pk<sub>1..u</sub>, pk')'':
+* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)'':
+* Return ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)''
+</div>
+
+<div>
+Internal Algorithm ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)'':
+* Let ''L = HashKeys(pk<sub>1..u</sub>)''
+* If ''pk' = pk2'':
+** Return 1
+* Return ''int(hash<sub>KeyAgg coefficient</sub>(L || pk')) mod n''<ref>The key aggregation coefficient is computed by hashing the individual public key instead of its index, which requires one more invocation of the SHA-256 compression function. However, it results in significantly simpler implementations because signers do not need to translate between public key indices before and after sorting.</ref>
+</div>
+
+==== Applying Tweaks ====
+
+<div>
+Algorithm ''ApplyTweak(keyagg_ctx, tweak, is_xonly_t)'':
+* Inputs:
+** The ''keyagg_ctx'': a [[#keyagg-context|KeyAgg Context]] data structure
+** The ''tweak'': a 32-byte array
+** The tweak mode ''is_xonly_t'': a boolean
+* Let ''(Q, gacc, tacc) = keyagg_ctx''
+* If ''is_xonly_t'' and ''not has_even_y(Q)'':
+** Let ''g = -1 mod n''
+* Else:
+** Let ''g = 1''
+* Let ''t = int(tweak)''; fail if ''t &ge; n''
+* Let ''Q' = g⋅Q + t⋅G''
+** Fail if ''is_infinite(Q')''
+* Let ''gacc' = g⋅gacc mod n''
+* Let ''tacc' = t + g⋅tacc mod n''
+* Return ''keyagg_ctx' = (Q', gacc', tacc')''
+</div>
+
+=== Nonce Generation ===
+
+<div>
+Algorithm ''NonceGen(sk, pk, aggpk, m, extra_in)'':
+* Inputs:
+** The secret signing key ''sk'': a 32-byte array (optional argument)
+** The individual public key ''pk'': a 33-byte array (see [[#modifications-to-nonce-generation|Modifications to Nonce Generation]] for the reason that this argument is mandatory)
+** The x-only aggregate public key ''aggpk'': a 32-byte array (optional argument)
+** The message ''m'': a byte array (optional argument)<ref name="mlen">In theory, the allowed message size is restricted because SHA256 accepts byte strings only up to size of 2^61-1 bytes (and because of the 8-byte length encoding).</ref>
+** The auxiliary input ''extra_in'': a byte array with ''0 &le; len(extra_in) &le; 2<sup>32</sup>-1'' (optional argument)
+* Let ''rand' '' be a 32-byte array freshly drawn uniformly at random
+* If the optional argument ''sk'' is present:
+** Let ''rand'' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand')''<ref>The random data is hashed (with a unique tag) as a precaution against situations where the randomness may be correlated with the secret signing key itself. It is xored with the secret key (rather than combined with it in a hash) to reduce the number of operations exposed to the actual secret key.</ref>
+* Else:
+** Let ''rand = rand' ''
+* If the optional argument ''aggpk'' is not present:
+** Let ''aggpk = empty_bytestring''
+* If the optional argument ''m'' is not present:
+** Let ''m_prefixed = bytes(1, 0)''
+* Else:
+** Let ''m_prefixed = bytes(1, 1) || bytes(8, len(m)) || m''
+* If the optional argument ''extra_in'' is not present:
+** Let ''extra_in = empty_bytestring''
+* Let ''k<sub>i</sub> = int(hash<sub>MuSig/nonce</sub>(rand || bytes(1, len(pk)) || pk || bytes(1, len(aggpk)) || aggpk || m_prefixed || bytes(4, len(extra_in)) || extra_in || bytes(1, i - 1))) mod n'' for ''i = 1,2''
+* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
+* Let ''R<sub>⁎,1</sub> = k<sub>1</sub>⋅G, R<sub>⁎,2</sub> = k<sub>2</sub>⋅G''
+* Let ''pubnonce = cbytes(R<sub>⁎,1</sub>) || cbytes(R<sub>⁎,2</sub>)''
+* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''<ref name="secnonce">The algorithms as specified here assume that the ''secnonce'' is stored as a 97-byte array using the serialization ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''. The same format is used in the reference implementation and in the test vectors. However, since the ''secnonce'' is (obviously) not meant to be sent over the wire, compatibility between implementations is not a concern, and this method of storing the ''secnonce'' is merely a suggestion.<br />
+The ''secnonce'' is effectively a local data structure of the signer which comprises the value triple ''(k<sub>1</sub>, k<sub>2</sub>, pk)'', and implementations may choose any suitable method to carry it from ''NonceGen'' (first communication round) to ''Sign'' (second communication round). In particular, implementations may choose to hide the ''secnonce'' in internal state without exposing it in an API explicitly, e.g., in an effort to prevent callers from reusing a ''secnonce'' accidentally.</ref>
+* Return ''(secnonce, pubnonce)''
+</div>
+
+=== Nonce Aggregation ===
+
+<div>
+Algorithm ''NonceAgg(pubnonce<sub>1..u</sub>)'':
+* Inputs:
+** The number ''u'' of ''pubnonces'' with ''0 < u < 2^32''
+** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
+* For ''j = 1 .. 2'':
+** For ''i = 1 .. u'':
+*** Let ''R<sub>i,j</sub> = cpoint(pubnonce<sub>i</sub>[(j-1)*33:j*33])''; fail if that fails and blame signer ''i'' for invalid ''pubnonce''.
+** Let ''R<sub>j</sub> = R<sub>1,j</sub> + R<sub>2,j</sub> + ... + R<sub>u,j</sub>''
+* Return ''aggnonce = cbytes_ext(R<sub>1</sub>) || cbytes_ext(R<sub>2</sub>)''
+</div>
+
+=== Session Context ===
+
+The Session Context is a data structure consisting of the following elements:
+* The aggregate public nonce ''aggnonce'': a 66-byte array
+* The number ''u'' of individual public keys with ''0 < u < 2^32''
+* The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+* The number ''v'' of tweaks with ''0 &le; v < 2^32''
+* The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+* The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
+* The message ''m'': a byte array<ref name="mlen" />
+
+We write "Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''" to assign names to the elements of a Session Context.
+
+<div>
+Algorithm ''GetSessionValues(session_ctx)'':
+* Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''
+* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
+* For ''i = 1 .. v'':
+** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
+* Let ''(Q, gacc, tacc) = keyagg_ctx<sub>v</sub>''
+* Let ''b = int(hash<sub>MuSig/noncecoef</sub>(aggnonce || xbytes(Q) || m)) mod n''
+* Let ''R<sub>1</sub> = cpoint_ext(aggnonce[0:33]), R<sub>2</sub> = cpoint_ext(aggnonce[33:66])''; fail if that fails and blame nonce aggregator for invalid ''aggnonce''.
+* Let ''R' = R<sub>1</sub> + b⋅R<sub>2</sub>''
+* If ''is_infinite(R'):
+** Let final nonce ''R = G'' (see [[#dealing-with-infinity-in-nonce-aggregation|Dealing with Infinity in Nonce Aggregation]])
+* Else:
+** Let final nonce ''R = R' ''
+* Let ''e = int(hash<sub>BIP0340/challenge</sub>(xbytes(R) || xbytes(Q) || m)) mod n''
+* Return ''(Q, gacc, tacc, b, R, e)''
+</div>
+
+<div>
+Algorithm ''GetSessionKeyAggCoeff(session_ctx, P)'':
+* Let ''(_, u, pk<sub>1..u</sub>, _, _, _, _) = session_ctx''
+* Let ''pk = cbytes(P)''
+* Fail if ''pk'' not in ''pk<sub>1..u</sub>''
+* Return ''KeyAggCoeff(pk<sub>1..u</sub>, pk)''
+</div>
+
+=== Signing ===
+
+<div>
+Algorithm ''Sign(secnonce, sk, session_ctx)'':
+* Inputs:
+** The secret nonce ''secnonce'' that has never been used as input to ''Sign'' before: a 97-byte array<ref name="secnonce" />
+** The secret key ''sk'': a 32-byte array
+** The ''session_ctx'': a [[#session-context|Session Context]] data structure
+* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* Let ''k<sub>1</sub>' = int(secnonce[0:32]), k<sub>2</sub>' = int(secnonce[32:64])''
+* Fail if ''k<sub>i</sub>' = 0'' or ''k<sub>i</sub>' &ge; n'' for ''i = 1..2''
+* Let ''k<sub>1</sub> = k<sub>1</sub>', k<sub>2</sub> = k<sub>2</sub>' '' if ''has_even_y(R)'', otherwise let ''k<sub>1</sub> = n - k<sub>1</sub>', k<sub>2</sub> = n - k<sub>2</sub>' ''
+* Let ''d' = int(sk)''
+* Fail if ''d' = 0'' or ''d' &ge; n''
+* Let ''P = d'⋅G''
+* Let ''pk = cbytes(P)''
+* Fail if ''pk &ne; secnonce[64:97]''
+* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''; fail if that fails<ref>Failing ''Sign'' when ''GetSessionKeyAggCoeff(session_ctx, P)'' fails is not necessary for unforgeability. It merely indicates to the caller that the scheme is not being used correctly.</ref>
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* <div id="Sign negation"></div>Let ''d = g⋅gacc⋅d' mod n'' (See [[negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]])
+* Let ''s = (k<sub>1</sub> + b⋅k<sub>2</sub> + e⋅a⋅d) mod n''
+* Let ''psig = bytes(32, s)''
+* Let ''pubnonce = cbytes(k<sub>1</sub>'⋅G) || cbytes(k<sub>2</sub>'⋅G)''
+* If ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'' (see below) returns failure, fail<ref>Verifying the signature before leaving the signer prevents random or adversarially provoked computation errors. This prevents publishing invalid signatures which may leak information about the secret key. It is recommended but can be omitted if the computation cost is prohibitive.</ref>
+* Return partial signature ''psig''
+</div>
+
+=== Partial Signature Verification ===
+
+<div>
+Algorithm ''PartialSigVerify(psig, pubnonce<sub>1..u</sub>, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, i)'':
+* Inputs:
+** The partial signature ''psig'': a 32-byte array
+** The number ''u'' of public nonces and individual public keys with ''0 < u < 2^32''
+** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
+** The number ''v'' of tweaks with ''0 &le; v < 2^32''
+** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+** The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
+** The message ''m'': a byte array<ref name="mlen" />
+** The index of the signer ''i'' in the of public nonces and individual public keys with ''0 < i &le; u''
+* Let ''aggnonce = NonceAgg(pubnonce<sub>1..u</sub>)''; fail if that fails
+* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
+* Run ''PartialSigVerifyInternal(psig, pubnonce<sub>i</sub>, pk<sub>i</sub>, session_ctx)''
+* Return success iff no failure occurred before reaching this point.
+</div>
+
+<div>
+Internal Algorithm ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'':
+* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* Let ''s = int(psig)''; fail if ''s &ge; n''
+* Let ''R<sub>⁎,1</sub> = cpoint(pubnonce[0:33]), R<sub>⁎,2</sub> = cpoint(pubnonce[33:66])''
+* Let ''Re<sub>⁎</sub>' = R<sub>⁎,1</sub> + b⋅R<sub>⁎,2</sub>''
+* Let effective nonce ''Re<sub>⁎</sub> = Re<sub>⁎</sub>' '' if ''has_even_y(R)'', otherwise let ''Re<sub>⁎</sub> = -Re<sub>⁎</sub>' ''
+* Let ''P = cpoint(pk)''; fail if that fails
+* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''<ref>''GetSessionKeyAggCoeff(session_ctx, P)'' cannot fail when called from ''PartialSigVerifyInternal''.</ref>
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* <div id="SigVerify negation"></div>Let ''g' = g⋅gacc mod n'' (See [[#negation-of-the-individual-public-key-when-partially-verifying|Negation Of The Individual Public Key When Partially Verifying]])
+* Fail if ''s⋅G &ne; Re<sub>⁎</sub> + e⋅a⋅g'⋅P''
+* Return success iff no failure occurred before reaching this point.
+</div>
+
+=== Partial Signature Aggregation ===
+
+<div>
+Algorithm ''PartialSigAgg(psig<sub>1..u</sub>, session_ctx)'':
+* Inputs:
+** The number ''u'' of signatures with ''0 < u < 2^32''
+** The partial signatures ''psig<sub>1..u</sub>'': ''u'' 32-byte arrays
+** The ''session_ctx'': a [[#session-context|Session Context]] data structure
+* Let ''(Q, _, tacc, _, _, R, e) = GetSessionValues(session_ctx)''; fail if that fails
+* For ''i = 1 .. u'':
+** Let ''s<sub>i</sub> = int(psig<sub>i</sub>)''; fail if ''s<sub>i</sub> &ge; n'' and blame signer ''i'' for invalid partial signature.
+* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
+* Let ''s = s<sub>1</sub> + ... + s<sub>u</sub> + e⋅g⋅tacc mod n''
+* Return ''sig = ''xbytes(R) || bytes(32, s)''
+</div>
+
+=== Test Vectors and Reference Code ===
+
+We provide a naive, highly inefficient, and non-constant time [[bip-0327/reference.py|pure Python 3 reference implementation of the key aggregation, partial signing, and partial signature verification algorithms]].
+
+Standalone JSON test vectors are also available in the [[bip-0327|same directory]], to facilitate porting the test vectors into other implementations.
+
+The reference implementation is for demonstration purposes only and not to be used in production environments.
+
+== Remarks on Security and Correctness ==
+
+=== Signing with Tweaked Individual Keys ===
+
+The scheme in this proposal has been designed to be secure
+even if signers tweak their individual secret keys with tweaks known to the adversary (e.g., as in BIP32 unhardened derivation)
+before providing the corresponding individual public keys as input to key aggregation.
+In particular, the scheme as specified above requires each signer to provide a final individual public key ''pk'' already to ''NonceGen'',
+which writes it into the ''secnonce'' array
+so that it can be checked against ''IndividualPubkey(sk)'' in the ''Sign'' algorithm.
+The purpose of this check in ''Sign'' is to ensure that ''pk'',
+and thus the secret key ''sk'' that will be provided to ''Sign'',
+is determined before the signer sends out the ''pubnonce''.
+
+If the check in ''Sign'' was omitted,
+and a signer supported signing with at least two different secret keys ''sk<sub>1</sub>'' and ''sk<sub>2</sub>''
+which have been obtained via tweaking another secret key with tweaks known to the adversary,
+then the adversary could, after having seen the ''pubnonce'',
+influence whether ''sk<sub>1</sub>'' or ''sk<sub>2</sub>'' is provided to ''Sign''.
+This degree of freedom may allow the adversary to perform a generalized birthday attack and thereby forge a signature
+(see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list post] and [https://github.com/jonasnick/musig2-tweaking writeup] for details).
+
+Checking ''pk'' against ''InvidualPubkey(sk)'' is a simple way to ensure
+that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked.
+This removes the adversary's ability to influence the secret key after having seen the ''pubnonce''
+and thus rules out the attack.<ref>Ensuring that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked is a simple policy to rule out the attack,
+but more flexible polices are conceivable.
+In fact, if the signer uses nothing but the message to be signed and the list of the individual public keys of all signers to decide which secret key to use,
+then it is not a problem that the adversary can influence this decision after having seen the ''pubnonce''.<br />
+More formally, consider modified algorithms ''NonceGen' '' and ''Sign' '', where ''NonceGen' '' does not take the individual public key of the signer as input and does not store it in pubnonce, and Sign' does not check read the individual public key from pubnonce and does not check it against the secret key taken as input.
+Then it suffices that for each invocation of ''NonceGen' '' with output ''(secnonce, pubnonce)'',
+a function ''fsk'' is determined before sending out ''pubnonce'',
+where ''fsk'' maps a pair consisting of a list of individual public keys and a message to a secret key,
+such that the secret key ''sk'' and the session context ''session_ctx = (_, _, pk<sub>1..u</sub>, _, _, _, m)''
+provided to the corresponding invocation of ''Sign'(secnonce, sk, session_ctx)'',
+adhere to the condition ''fsk(pk<sub>1..u</sub>, m) = sk''.<br />
+However, this requirement is complex and hard to enforce in implementations.
+The algorithms ''NonceGen'' and ''Sign'' specified in this BIP are effectively restricted to constant functions ''fsk(_, _) = sk''.
+In other words, their usage ensure that the secret key ''sk'' of the signers is determined entirely when invoking ''NonceGen'',
+which is enforced easily by letting ''NonceGen'' take the corresponding individual public key ''pk'' as input and checking ''pk'' against ''IndividualPubKey(sk)'' in ''Sign''.</ref>
+Note that the scheme as given in the [https://eprint.iacr.org/2020/1261 MuSig2 paper] does not perform the check in ''Sign''.
+However, the security model in the paper does not cover tweaking at all and assumes a single fixed secret key.
+
+=== Modifications to Nonce Generation ===
+
+Implementers must avoid modifying the ''NonceGen'' algorithm without being fully aware of the implications.
+We provide two modifications to ''NonceGen'' that are secure when applied correctly and may be useful in special circumstances, summarized in the following table.
+
+{| class="wikitable" style="margin:auto"
+! !! needs secure randomness !! needs secure counter !! needs to keep state securely !! needs aggregate nonce of all other signers (only possible for one signer)
+|-
+! NonceGen || ✓ || &nbsp; || ✓ || &nbsp;
+|-
+! CounterNonceGen || &nbsp; || ✓ || ✓ || &nbsp;
+|-
+! DeterministicSign || &nbsp; || &nbsp; || &nbsp; || ✓
+|}
+
+First, on systems where obtaining uniformly random values is much harder than maintaining a global atomic counter, it can be beneficial to modify ''NonceGen''.
+The resulting algorithm ''CounterNonceGen'' does not draw ''rand' '' uniformly at random but instead sets ''rand' '' to the value of an atomic counter that is incremented whenever it is read.
+With this modification, the secret signing key ''sk'' of the signer generating the nonce is '''not''' an optional argument and must be provided to ''NonceGen''.
+The security of the resulting scheme then depends on the requirement that reading the counter must never yield the same counter value in two ''NonceGen'' invocations with the same ''sk''.
+
+Second, if there is a unique signer who is supposed to send the ''pubnonce'' last, it is possible to modify nonce generation for this single signer to not require high-quality randomness.
+Such a nonce generation algorithm ''DeterministicSign'' is specified below.
+Note that the only optional argument is ''rand'', which can be omitted if randomness is entirely unavailable.
+''DeterministicSign'' requires the argument ''aggothernonce'' which should be set to the output of ''NonceAgg'' run on the ''pubnonce'' value of '''all''' other signers (but can be provided by an untrusted party).
+Hence, using ''DeterministicSign'' is only possible for the last signer to generate a nonce and makes the signer stateless, similar to the stateless signer described in the [[#nonce-generation|Nonce Generation]] section.
+
+==== Deterministic and Stateless Signing for a Single Signer ====
+
+<div>
+Algorithm ''DeterministicSign(sk, aggothernonce, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, rand)'':
+* Inputs:
+** The secret signing key ''sk'': a 32-byte array
+** The aggregate public nonce ''aggothernonce'' (see [[#modifications-to-nonce-generation|above]]): a 66-byte array
+** The number ''u'' of individual public keys with ''0 < u < 2^32''
+** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 32-byte arrays
+** The number ''v'' of tweaks with ''0 &le; v < 2^32''
+** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
+** The tweak methods ''is_xonly_t<sub>1..v</sub>'': ''v'' booleans
+** The message ''m'': a byte array<ref name="mlen" />
+** The auxiliary randomness ''rand'': a 32-byte array (optional argument)
+* If the optional argument ''rand'' is present:
+** Let ''sk' '' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand)''
+* Else:
+** Let ''sk' = sk''
+* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
+* For ''i = 1 .. v'':
+** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
+* Let ''aggpk = GetPubkey(keyagg_ctx<sub>v</sub>)''
+* Let ''k<sub>i</sub> = int(hash<sub>MuSig/deterministic/nonce</sub>(sk' || aggothernonce || aggpk || bytes(8, len(m)) || m || bytes(1, i - 1))) mod n'' for ''i = 1,2''
+* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
+* Let ''R<sub>⁎,1</sub> = k<sub>1</sub>⋅G, R<sub>⁎,2</sub> = k<sub>2</sub>⋅G''
+* Let ''pubnonce = cbytes(R<sub>⁎,2</sub>) || cbytes(R<sub>⁎,2</sub>)''
+* Let ''d = int(sk)''
+* Fail if ''d = 0'' or ''d &ge; n''
+* Let ''pk = cbytes(d⋅G)''
+* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''
+* Let ''aggnonce = NonceAgg((pubnonce, aggothernonce))''; fail if that fails and blame nonce aggregator for invalid ''aggothernonce''.
+* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
+* Return ''(pubnonce, Sign(secnonce, sk, session_ctx))''
+</div>
+
+=== Tweaking Definition ===
+
+Two modes of tweaking the aggregate public key are supported. They correspond to the following algorithms:
+
+<div>
+Algorithm ''ApplyPlainTweak(P, t)'':
+* Inputs:
+** ''P'': a point
+** The tweak ''t'': an integer with ''0 &le; t < n ''
+* Return ''P + t⋅G''
+</div>
+
+<div>
+Algorithm ''ApplyXonlyTweak(P, t)'':
+* Return ''with_even_y(P) + t⋅G''
+</div>
+
+=== Negation Of The Secret Key When Signing ===
+
+In order to produce a partial signature for an X-only aggregate public key that is an aggregate of ''u'' individual public keys and tweaked ''v'' times (X-only or plain), the ''[[#Sign negation|Sign]]'' algorithm may need to negate the secret key during the signing process.
+
+<poem>
+The following elliptic curve points arise as intermediate steps when creating a signature:
+• ''P<sub>i</sub>'' as computed in ''KeyAgg'' is the point corresponding to the ''i''-th signer's individual public key. Defining ''d<sub>i</sub>' '' to be the ''i''-th signer's secret key as an integer, i.e., the ''d' '' value as computed in the ''Sign'' algorithm of the ''i''-th signer, we have
+ ''P<sub>i</sub> = d<sub>i</sub>'⋅G ''.
+• ''Q<sub>0</sub>'' is the aggregate of the individual public keys. It is identical to value ''Q'' computed in ''KeyAgg'' and therefore defined as
+ ''Q<sub>0</sub> = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''.
+• ''Q<sub>i</sub>'' is the tweaked aggregate public key after the ''i''-th execution of ''ApplyTweak'' for ''1 &le; i &le; v''. It holds that
+ ''Q<sub>i</sub> = f(i-1) + t<sub>i</sub>⋅G'' for ''i = 1, ..., v'' where
+ ''f(i-1) := with_even_y(Q<sub>i-1</sub>)'' if ''is_xonly_t<sub>i</sub>'' and
+ ''f(i-1) := Q<sub>i-1</sub>'' otherwise.
+• ''with_even_y(Q<sub>v</sub>)'' is the final result of the key aggregation and tweaking operations. It corresponds to the output of ''GetXonlyPubkey'' applied on the final KeyAgg Context.
+</poem>
+
+The signer's goal is to produce a partial signature corresponding to the final result of key aggregation and tweaking, i.e., the X-only public key ''with_even_y(Q<sub>v</sub>)''.
+
+<poem>
+For ''1 &le; i &le; v'', we denote the value ''g'' computed in the ''i''-th execution of ''ApplyTweak'' by ''g<sub>i-1</sub>''. Therefore, ''g<sub>i-1</sub>'' is ''-1 mod n'' if and only if ''is_xonly_t<sub>i</sub>'' is true and ''Q<sub>i-1</sub>'' has an odd Y coordinate. In other words, ''g<sub>i-1</sub>'' indicates whether ''Q<sub>i-1</sub>'' needed to be negated to apply an X-only tweak:
+ ''f(i-1) = g<sub>i-1</sub>⋅Q<sub>i-1</sub>'' for ''1 &le; i &le; v''.
+
+Furthermore, the ''Sign'' and ''PartialSigVerify'' algorithms set value ''g'' depending on whether ''Q<sub>v</sub>'' needed to be negated to produce the (X-only) final output. For consistency, this value ''g'' is referred to as ''g<sub>v</sub>'' in this section.
+ ''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅Q<sub>v</sub>''.
+</poem>
+
+<poem>
+So, the (X-only) final public key is
+ ''with_even_y(Q<sub>v</sub>)
+ = g<sub>v</sub>⋅Q<sub>v</sub>
+ = g<sub>v</sub>⋅(f(v-1) + t<sub>v</sub>⋅G)
+ = g<sub>v</sub>⋅(g<sub>v-1</sub>⋅(f(v-2) + t<sub>v-1</sub>⋅G) + t<sub>v</sub>⋅G)
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + g<sub>v</sub>⋅(t<sub>v</sub> + g<sub>v-1</sub>⋅t<sub>v-1</sub>)⋅G
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + (sum<sub>i=v-1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
+ = g<sub>v</sub>⋅g<sub>v-1</sub>⋅...⋅g<sub>1</sub>⋅f(0) + (sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
+ = g<sub>v</sub>⋅...⋅g<sub>0</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''
+ where ''tacc<sub>i</sub>'' is computed by ''KeyAgg'' and ''ApplyTweak'' as follows:
+ ''tacc<sub>0</sub> = 0
+ tacc<sub>i</sub> = t<sub>i</sub> + g<sub>i-1</sub>⋅tacc<sub>i-1</sub> for i=1..v mod n''
+ for which it holds that ''g<sub>v</sub>⋅tacc<sub>v</sub> = sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>''.
+</poem>
+
+<poem>
+''KeyAgg'' and ''ApplyTweak'' compute
+ ''gacc<sub>0</sub> = 1
+ gacc<sub>i</sub> = g<sub>i-1</sub>⋅gacc<sub>i-1</sub> for i=1..v mod n''
+So we can rewrite above equation for the final public key as
+ ''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''.
+</poem>
+
+<poem>
+Then we have
+ ''with_even_y(Q<sub>v</sub>) - g<sub>v</sub>⋅tacc<sub>v</sub>⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub>
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅P<sub>1</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>)
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅d<sub>1</sub>'⋅G + ... + a<sub>u</sub>⋅d<sub>u</sub>'⋅G)
+ = sum<sub>i=1..u</sub>(g<sub>v</sub>⋅gacc<sub>v</sub>⋅a<sub>i</sub>⋅d<sub>i</sub>')*G''.
+</poem>
+
+Intuitively, ''gacc<sub>i</sub>'' tracks accumulated sign flipping and ''tacc<sub>i</sub>'' tracks the accumulated tweak value after applying the first ''i'' individual tweaks. Additionally, ''g<sub>v</sub>'' indicates whether ''Q<sub>v</sub>'' needed to be negated to produce the final X-only result. Thus, signer ''i'' multiplies its secret key ''d<sub>i</sub>' '' with ''g<sub>v</sub>⋅gacc<sub>v</sub>'' in the ''[[#Sign negation|Sign]]'' algorithm.
+
+==== Negation Of The Individual Public Key When Partially Verifying ====
+
+<poem>
+As explained in [[#negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]] the signer uses a possibly negated secret key
+ ''d = g<sub>v</sub>⋅gacc<sub>v</sub>⋅d' mod n''
+when producing a partial signature to ensure that the aggregate signature will correspond to an aggregate public key with even Y coordinate.
+</poem>
+
+<poem>
+The ''[[#SigVerify negation|PartialSigVerifyInternal]]'' algorithm is supposed to check
+ ''s⋅G = Re<sub>⁎</sub> + e⋅a⋅d⋅G''.
+</poem>
+
+<poem>
+The verifier doesn't have access to ''d⋅G'' but can construct it using the individual public key ''pk'' as follows:
+''d⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅d'⋅G
+ = g<sub>v</sub>⋅gacc<sub>v</sub>⋅cpoint(pk)''
+Note that the aggregate public key and list of tweaks are inputs to partial signature verification, so the verifier can also construct ''g<sub>v</sub>'' and ''gacc<sub>v</sub>''.
+</poem>
+
+=== Dealing with Infinity in Nonce Aggregation ===
+
+If the nonce aggregator provides ''aggnonce = bytes(33,0) || bytes(33,0)'', either the nonce aggregator is dishonest or there is at least one dishonest signer (except with negligible probability).
+If signing aborted in this case, it would be impossible to determine who is dishonest.
+Therefore, signing continues so that the culprit is revealed when collecting and verifying partial signatures.
+
+However, the final nonce ''R'' of a BIP340 Schnorr signature cannot be the point at infinity.
+If we would nonetheless allow the final nonce to be the point at infinity, then the scheme would lose the following property:
+if ''PartialSigVerify'' succeeds for all partial signatures, then ''PartialSigAgg'' will return a valid Schnorr signature.
+Since this is a valuable feature, we modify MuSig2* (which is defined in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]) to avoid producing an invalid Schnorr signature while still allowing detection of the dishonest signer: In ''GetSessionValues'', if the final nonce ''R'' would be the point at infinity, set it to the generator instead (an arbitrary choice).
+
+This modification to ''GetSessionValues'' does not affect the unforgeability of the scheme.
+Given a successful adversary against the unforgeability game (EUF-CMA) for the modified scheme, a reduction can win the unforgeability game for the original scheme by simulating the modification towards the adversary:
+When the adversary provides ''aggnonce' = bytes(33, 0) || bytes(33, 0)'', the reduction sets ''aggnonce = cbytes_ext(G) || bytes(33, 0)''.
+For any other ''aggnonce' '', the reduction sets ''aggnonce = aggnonce' ''.
+(The case that the adversary provides an ''aggnonce' ≠ bytes(33, 0) || bytes(33, 0) '' but nevertheless ''R' '' in ''GetSessionValues'' is the point at infinity happens only with negligible probability.)
+
+=== Choosing the Size of the Nonce ===
+
+The [https://eprint.iacr.org/2020/1261 MuSig2 paper] contains two security proofs that apply to different variants of the scheme.
+The first proof relies on the random oracle model (ROM) and applies to a scheme variant where each signer's nonce consists of four elliptic curve points.
+The second proof requires a stronger model, namely the combination of the ROM and the algebraic group model (AGM),
+and applies to an optimized scheme variant where the signers' nonces consist of only two points.
+This proposal uses the latter, optimized scheme variant.
+Relying on the stronger model is a legitimate choice for the following reasons:
+
+First, an approach widely taken is interpreting a Forking Lemma proof in the ROM merely as design justification and ignoring the loss of security due to the Forking Lemma.
+If one believes in this approach, then the ROM may not be the optimal model in the first place because some parts of the concrete security bound are arbitrarily ignored.
+One may just as well move to the ROM+AGM model, which produces bounds close to the best-known attacks, e.g., for Schnorr signatures.
+
+Second, as of this writing, there is no instance of a serious cryptographic scheme with a security proof in the AGM that is not secure in practice.
+There are, however, insecure toy schemes with AGM security proofs, but those explicitly violate the requirements of the AGM.
+[https://eprint.iacr.org/2022/226.pdf Broken AGM proofs of toy schemes] provide group elements to the adversary without declaring them as group element inputs.
+In contrast, in MuSig2, all group elements that arise in the scheme are known to the adversary and declared as group element inputs.
+A scheme very similar to MuSig2 and with two-point nonces was independently proven secure in the ROM and AGM by [https://eprint.iacr.org/2020/1245 Alper and Burdges].
+
+== Backwards Compatibility ==
+
+This document proposes a standard for the MuSig2 multi-signature scheme that is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
+MuSig2 is ''not'' compatible with ECDSA signatures traditionally used in Bitcoin.
+
+== Change Log ==
+
+To help implementers understand updates to this document, we attach a version number that resembles ''semantic versioning'' (<code>MAJOR.MINOR.PATCH</code>).
+The <code>MAJOR</code> version is incremented if changes to the BIP are introduced that are incompatible with prior versions.
+An exception to this rule is <code>MAJOR</code> version zero (0.y.z) which is for development and does not need to be incremented if backwards incompatible changes are introduced.
+The <code>MINOR</code> version is incremented whenever the inputs or the output of an algorithm changes in a backward-compatible way or new backward-compatible functionality is added.
+The <code>PATCH</code> version is incremented for other changes that are noteworthy (bug fixes, test vectors, important clarifications, etc.).
+
+* '''1.0.0''' (2023-03-26):
+** Number 327 was assigned to this BIP.
+* '''1.0.0-rc.4''' (2023-03-02):
+** Add expected value of ''pubnonce'' to ''NonceGen'' test vectors.
+* '''1.0.0-rc.3''' (2023-02-28):
+** Improve ''NonceGen'' test vectors by not using an all-zero hex string as ''rand_'' values. This change addresses potential issues in some implementations that interpret this as a special value indicating uninitialized memory or a broken random number generator and therefore return an error.
+** Fix invalid length of a ''pubnonce'' in the ''PartialSigVerify'' test vectors.
+** Improve ''KeySort'' test vector.
+** Add explicit ''IndividualPubkey'' algorithm.
+** Rename KeyGen Context to KeyAgg Context.
+* '''1.0.0-rc.2''' (2022-10-28):
+** Fix vulnerability that can occur in certain unusual scenarios (see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list]: Add mandatory ''pk'' argument to ''NonceGen'', append ''pk'' to ''secnonce'' and check in ''Sign'' that the ''pk'' in ''secnonce'' matches. Update test vectors.
+** Make sure that signer's key is in list of individual public keys by adding failure case to ''GetSessionKeyAggCoeff'' and add test vectors.
+* '''1.0.0-rc.1''' (2022-10-03): Submit draft BIP to the BIPs repository
+* '''0.8.6''' (2022-09-15): Clarify that implementations do not need to support every feature and add a test vector for signing with a tweaked key
+* '''0.8.5''' (2022-09-05): Rename some functions to improve clarity.
+* '''0.8.4''' (2022-09-02): Make naming of nonce variants ''R'' in specifications of the algorithms and reference code easier to read and more consistent.
+* '''0.8.3''' (2022-09-01): Overwrite ''secnonce'' in ''sign'' reference implementation to help prevent accidental reuse and add test vector for invalid ''secnonce''.
+* '''0.8.2''' (2022-08-30): Fix ''KeySort'' input length and add test vectors
+* '''0.8.1''' (2022-08-26): Add ''DeterministicSign'' algorithm
+* '''0.8.0''' (2022-08-26): Switch from X-only to plain public key for individual public keys. This requires updating a large portion of the test vectors.
+* '''0.7.2''' (2022-08-17): Add ''NonceGen'' and ''Sign/PartialSigVerify'' test vectors for messages longer than 32 bytes.
+* '''0.7.1''' (2022-08-10): Extract test vectors into separate JSON file.
+* '''0.7.0''' (2022-07-31): Change ''NonceGen'' such that output when message is not present is different from when message is present but has length 0.
+* '''0.6.0''' (2022-07-31): Allow variable length messages, change serialization of the message in the ''NonceGen'' hash function, and add test vectors
+* '''0.5.2''' (2022-06-26): Fix ''aggpk'' in ''NonceGen'' test vectors.
+* '''0.5.1''' (2022-06-22): Rename "ordinary" tweaking to "plain" tweaking.
+* '''0.5.0''' (2022-06-21): Separate ApplyTweak from KeyAgg and introduce KeyGen Context.
+* '''0.4.0''' (2022-06-20): Allow the output of NonceAgg to be infinity and add test vectors
+* '''0.3.2''' (2022-06-02): Add a lot of test vectors and improve handling of invalid contributions in reference code.
+* '''0.3.1''' (2022-05-24): Add ''NonceGen'' test vectors
+* '''0.3.0''' (2022-05-24): Hash ''i - 1'' instead of ''i'' in ''NonceGen''
+* '''0.2.0''' (2022-05-19): Change order of arguments in ''NonceGen'' hash function
+* '''0.1.0''' (2022-05-19): Publication of draft BIP on the bitcoin-dev mailing list
+
+== Footnotes ==
+
+<references />
+
+== Acknowledgements ==
+
+We thank Brandon Black, Riccardo Casatta, Lloyd Fournier, Russell O'Connor, and Pieter Wuille for their contributions to this document.
diff --git a/bip-0327/gen_vectors_helper.py b/bip-0327/gen_vectors_helper.py
new file mode 100644
index 0000000..a70bb6f
--- /dev/null
+++ b/bip-0327/gen_vectors_helper.py
@@ -0,0 +1,184 @@
+from reference import *
+
+def gen_key_agg_vectors():
+ print("key_agg_vectors.json: Intermediate tweaking result is point at infinity")
+ sk = bytes.fromhex("7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671")
+ pk = individual_pk(sk)
+ keygen_ctx = key_agg([pk])
+ aggpoint, _, _ = keygen_ctx
+ aggsk = key_agg_coeff([pk], pk)*int_from_bytes(sk) % n
+ t = n - aggsk
+ assert point_add(point_mul(G, t), aggpoint) == None
+ is_xonly = False
+ tweak = bytes_from_int(t)
+ assert_raises(ValueError, lambda: apply_tweak(keygen_ctx, tweak, is_xonly), lambda e: True)
+ print(" pubkey:", pk.hex().upper())
+ print(" tweak: ", tweak.hex().upper())
+
+def check_sign_verify_vectors():
+ with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
+ test_data = json.load(f)
+ X = fromhex_all(test_data["pubkeys"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ aggnonces = fromhex_all(test_data["aggnonces"])
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ for (i, test_case) in enumerate(valid_test_cases):
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ assert nonce_agg(pubnonces) == aggnonce
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ (Q, _, _, _, R, _) = get_session_values(session_ctx)
+ # Make sure the vectors include tests for both variants of Q and R
+ if i == 0:
+ assert has_even_y(Q) and not has_even_y(R)
+ if i == 1:
+ assert not has_even_y(Q) and has_even_y(R)
+ if i == 2:
+ assert has_even_y(Q) and has_even_y(R)
+
+def check_tweak_vectors():
+ with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ tweak = fromhex_all(test_data["tweaks"])
+ valid_test_cases = test_data["valid_test_cases"]
+
+ for (i, test_case) in enumerate(valid_test_cases):
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+
+ _, gacc, _ = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
+ # Make sure the vectors include tests for gacc = 1 and -1
+ if i == 0:
+ assert gacc == n - 1
+ if i == 1:
+ assert gacc == 1
+
+def sig_agg_vectors():
+ print("sig_agg_vectors.json:")
+ sk = fromhex_all([
+ "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "3874D22DE7A7290C49CE7F1DC17D1A8CD8918E1F799055139D57FC0988D04D10",
+ "D0EA1B84481ED1BCFAA39D6775F97BDC9BF8D7C02FD0C009D6D85BAE5EC7B87A",
+ "FC2BF9E056B273AF0A8AABB815E541A3552C142AC10D4FE584F01D2CAB84F577"])
+ pubkeys = list(map(lambda secret: individual_pk(secret), sk))
+ indices32 = [i.to_bytes(32, 'big') for i in range(6)]
+ secnonces, pnonces = zip(*[nonce_gen_internal(r, None, pubkeys[0], None, None, None) for r in indices32])
+ tweaks = fromhex_all([
+ "B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
+ "A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
+ "75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"])
+ msg = bytes.fromhex("599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869")
+
+ psigs = [None] * 9
+
+ valid_test_cases = [
+ {
+ "aggnonce": None,
+ "nonce_indices": [0, 1],
+ "key_indices": [0, 1],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [0, 1],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 2],
+ "key_indices": [0, 2],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [2, 3],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 3],
+ "key_indices": [0, 2],
+ "tweak_indices": [0],
+ "is_xonly": [False],
+ "psig_indices": [4, 5],
+ }, {
+ "aggnonce": None,
+ "nonce_indices": [0, 4],
+ "key_indices": [0, 3],
+ "tweak_indices": [0, 1, 2],
+ "is_xonly": [True, False, True],
+ "psig_indices": [6, 7],
+ },
+ ]
+ for (i, test_case) in enumerate(valid_test_cases):
+ is_xonly = test_case["is_xonly"]
+ nonce_indices = test_case["nonce_indices"]
+ key_indices = test_case["key_indices"]
+ psig_indices = test_case["psig_indices"]
+ vec_pnonces = [pnonces[i] for i in nonce_indices]
+ vec_pubkeys = [pubkeys[i] for i in key_indices]
+ vec_tweaks = [tweaks[i] for i in test_case["tweak_indices"]]
+
+ aggnonce = nonce_agg(vec_pnonces)
+ test_case["aggnonce"] = aggnonce.hex().upper()
+ session_ctx = SessionContext(aggnonce, vec_pubkeys, vec_tweaks, is_xonly, msg)
+
+ for j in range(len(key_indices)):
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ secnonce_tmp = bytearray(secnonces[nonce_indices[j]][:64] + pubkeys[key_indices[j]])
+ psigs[psig_indices[j]] = sign(secnonce_tmp, sk[key_indices[j]], session_ctx)
+ sig = partial_sig_agg([psigs[i] for i in psig_indices], session_ctx)
+ keygen_ctx = key_agg_and_tweak(vec_pubkeys, vec_tweaks, is_xonly)
+ # To maximize coverage of the sig_agg algorithm, we want one public key
+ # point with an even and one with an odd Y coordinate.
+ if i == 0:
+ assert(has_even_y(keygen_ctx[0]))
+ if i == 1:
+ assert(not has_even_y(keygen_ctx[0]))
+ aggpk = get_xonly_pk(keygen_ctx)
+ assert schnorr_verify(msg, aggpk, sig)
+ test_case["expected"] = sig.hex().upper()
+
+ error_test_case = {
+ "aggnonce": None,
+ "nonce_indices": [0, 4],
+ "key_indices": [0, 3],
+ "tweak_indices": [0, 1, 2],
+ "is_xonly": [True, False, True],
+ "psig_indices": [7, 8],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1
+ },
+ "comment": "Partial signature is invalid because it exceeds group size"
+ }
+
+ psigs[8] = bytes.fromhex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141")
+
+ vec_pnonces = [pnonces[i] for i in error_test_case["nonce_indices"]]
+ aggnonce = nonce_agg(vec_pnonces)
+ error_test_case["aggnonce"] = aggnonce.hex().upper()
+
+ def tohex_all(l):
+ return list(map(lambda e: e.hex().upper(), l))
+
+ print(json.dumps({
+ "pubkeys": tohex_all(pubkeys),
+ "pnonces": tohex_all(pnonces),
+ "tweaks": tohex_all(tweaks),
+ "psigs": tohex_all(psigs),
+ "msg": msg.hex().upper(),
+ "valid_test_cases": valid_test_cases,
+ "error_test_cases": [error_test_case]
+ }, indent=4))
+
+gen_key_agg_vectors()
+check_sign_verify_vectors()
+check_tweak_vectors()
+print()
+sig_agg_vectors()
diff --git a/bip-0327/reference.py b/bip-0327/reference.py
new file mode 100644
index 0000000..edf6e76
--- /dev/null
+++ b/bip-0327/reference.py
@@ -0,0 +1,880 @@
+# BIP327 reference implementation
+#
+# WARNING: This implementation is for demonstration purposes only and _not_ to
+# be used in production environments. The code is vulnerable to timing attacks,
+# for example.
+
+from typing import Any, List, Optional, Tuple, NewType, NamedTuple
+import hashlib
+import secrets
+import time
+
+#
+# The following helper functions were copied from the BIP-340 reference implementation:
+# https://github.com/bitcoin/bips/blob/master/bip-0340/reference.py
+#
+
+p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
+n = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
+
+# Points are tuples of X and Y coordinates and the point at infinity is
+# represented by the None keyword.
+G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
+
+Point = Tuple[int, int]
+
+# This implementation can be sped up by storing the midstate after hashing
+# tag_hash instead of rehashing it all the time.
+def tagged_hash(tag: str, msg: bytes) -> bytes:
+ tag_hash = hashlib.sha256(tag.encode()).digest()
+ return hashlib.sha256(tag_hash + tag_hash + msg).digest()
+
+def is_infinite(P: Optional[Point]) -> bool:
+ return P is None
+
+def x(P: Point) -> int:
+ assert not is_infinite(P)
+ return P[0]
+
+def y(P: Point) -> int:
+ assert not is_infinite(P)
+ return P[1]
+
+def point_add(P1: Optional[Point], P2: Optional[Point]) -> Optional[Point]:
+ if P1 is None:
+ return P2
+ if P2 is None:
+ return P1
+ if (x(P1) == x(P2)) and (y(P1) != y(P2)):
+ return None
+ if P1 == P2:
+ lam = (3 * x(P1) * x(P1) * pow(2 * y(P1), p - 2, p)) % p
+ else:
+ lam = ((y(P2) - y(P1)) * pow(x(P2) - x(P1), p - 2, p)) % p
+ x3 = (lam * lam - x(P1) - x(P2)) % p
+ return (x3, (lam * (x(P1) - x3) - y(P1)) % p)
+
+def point_mul(P: Optional[Point], n: int) -> Optional[Point]:
+ R = None
+ for i in range(256):
+ if (n >> i) & 1:
+ R = point_add(R, P)
+ P = point_add(P, P)
+ return R
+
+def bytes_from_int(x: int) -> bytes:
+ return x.to_bytes(32, byteorder="big")
+
+def lift_x(b: bytes) -> Optional[Point]:
+ x = int_from_bytes(b)
+ if x >= p:
+ return None
+ y_sq = (pow(x, 3, p) + 7) % p
+ y = pow(y_sq, (p + 1) // 4, p)
+ if pow(y, 2, p) != y_sq:
+ return None
+ return (x, y if y & 1 == 0 else p-y)
+
+def int_from_bytes(b: bytes) -> int:
+ return int.from_bytes(b, byteorder="big")
+
+def has_even_y(P: Point) -> bool:
+ assert not is_infinite(P)
+ return y(P) % 2 == 0
+
+def schnorr_verify(msg: bytes, pubkey: bytes, sig: bytes) -> bool:
+ if len(msg) != 32:
+ raise ValueError('The message must be a 32-byte array.')
+ if len(pubkey) != 32:
+ raise ValueError('The public key must be a 32-byte array.')
+ if len(sig) != 64:
+ raise ValueError('The signature must be a 64-byte array.')
+ P = lift_x(pubkey)
+ r = int_from_bytes(sig[0:32])
+ s = int_from_bytes(sig[32:64])
+ if (P is None) or (r >= p) or (s >= n):
+ return False
+ e = int_from_bytes(tagged_hash("BIP0340/challenge", sig[0:32] + pubkey + msg)) % n
+ R = point_add(point_mul(G, s), point_mul(P, n - e))
+ if (R is None) or (not has_even_y(R)) or (x(R) != r):
+ return False
+ return True
+
+#
+# End of helper functions copied from BIP-340 reference implementation.
+#
+
+PlainPk = NewType('PlainPk', bytes)
+XonlyPk = NewType('XonlyPk', bytes)
+
+# There are two types of exceptions that can be raised by this implementation:
+# - ValueError for indicating that an input doesn't conform to some function
+# precondition (e.g. an input array is the wrong length, a serialized
+# representation doesn't have the correct format).
+# - InvalidContributionError for indicating that a signer (or the
+# aggregator) is misbehaving in the protocol.
+#
+# Assertions are used to (1) satisfy the type-checking system, and (2) check for
+# inconvenient events that can't happen except with negligible probability (e.g.
+# output of a hash function is 0) and can't be manually triggered by any
+# signer.
+
+# This exception is raised if a party (signer or nonce aggregator) sends invalid
+# values. Actual implementations should not crash when receiving invalid
+# contributions. Instead, they should hold the offending party accountable.
+class InvalidContributionError(Exception):
+ def __init__(self, signer, contrib):
+ self.signer = signer
+ # contrib is one of "pubkey", "pubnonce", "aggnonce", or "psig".
+ self.contrib = contrib
+
+infinity = None
+
+def xbytes(P: Point) -> bytes:
+ return bytes_from_int(x(P))
+
+def cbytes(P: Point) -> bytes:
+ a = b'\x02' if has_even_y(P) else b'\x03'
+ return a + xbytes(P)
+
+def cbytes_ext(P: Optional[Point]) -> bytes:
+ if is_infinite(P):
+ return (0).to_bytes(33, byteorder='big')
+ assert P is not None
+ return cbytes(P)
+
+def point_negate(P: Optional[Point]) -> Optional[Point]:
+ if P is None:
+ return P
+ return (x(P), p - y(P))
+
+def cpoint(x: bytes) -> Point:
+ if len(x) != 33:
+ raise ValueError('x is not a valid compressed point.')
+ P = lift_x(x[1:33])
+ if P is None:
+ raise ValueError('x is not a valid compressed point.')
+ if x[0] == 2:
+ return P
+ elif x[0] == 3:
+ P = point_negate(P)
+ assert P is not None
+ return P
+ else:
+ raise ValueError('x is not a valid compressed point.')
+
+def cpoint_ext(x: bytes) -> Optional[Point]:
+ if x == (0).to_bytes(33, 'big'):
+ return None
+ else:
+ return cpoint(x)
+
+# Return the plain public key corresponding to a given secret key
+def individual_pk(seckey: bytes) -> PlainPk:
+ d0 = int_from_bytes(seckey)
+ if not (1 <= d0 <= n - 1):
+ raise ValueError('The secret key must be an integer in the range 1..n-1.')
+ P = point_mul(G, d0)
+ assert P is not None
+ return PlainPk(cbytes(P))
+
+def key_sort(pubkeys: List[PlainPk]) -> List[PlainPk]:
+ pubkeys.sort()
+ return pubkeys
+
+KeyAggContext = NamedTuple('KeyAggContext', [('Q', Point),
+ ('gacc', int),
+ ('tacc', int)])
+
+def get_xonly_pk(keyagg_ctx: KeyAggContext) -> XonlyPk:
+ Q, _, _ = keyagg_ctx
+ return XonlyPk(xbytes(Q))
+
+def key_agg(pubkeys: List[PlainPk]) -> KeyAggContext:
+ pk2 = get_second_key(pubkeys)
+ u = len(pubkeys)
+ Q = infinity
+ for i in range(u):
+ try:
+ P_i = cpoint(pubkeys[i])
+ except ValueError:
+ raise InvalidContributionError(i, "pubkey")
+ a_i = key_agg_coeff_internal(pubkeys, pubkeys[i], pk2)
+ Q = point_add(Q, point_mul(P_i, a_i))
+ # Q is not the point at infinity except with negligible probability.
+ assert(Q is not None)
+ gacc = 1
+ tacc = 0
+ return KeyAggContext(Q, gacc, tacc)
+
+def hash_keys(pubkeys: List[PlainPk]) -> bytes:
+ return tagged_hash('KeyAgg list', b''.join(pubkeys))
+
+def get_second_key(pubkeys: List[PlainPk]) -> PlainPk:
+ u = len(pubkeys)
+ for j in range(1, u):
+ if pubkeys[j] != pubkeys[0]:
+ return pubkeys[j]
+ return PlainPk(b'\x00'*33)
+
+def key_agg_coeff(pubkeys: List[PlainPk], pk_: PlainPk) -> int:
+ pk2 = get_second_key(pubkeys)
+ return key_agg_coeff_internal(pubkeys, pk_, pk2)
+
+def key_agg_coeff_internal(pubkeys: List[PlainPk], pk_: PlainPk, pk2: PlainPk) -> int:
+ L = hash_keys(pubkeys)
+ if pk_ == pk2:
+ return 1
+ return int_from_bytes(tagged_hash('KeyAgg coefficient', L + pk_)) % n
+
+def apply_tweak(keyagg_ctx: KeyAggContext, tweak: bytes, is_xonly: bool) -> KeyAggContext:
+ if len(tweak) != 32:
+ raise ValueError('The tweak must be a 32-byte array.')
+ Q, gacc, tacc = keyagg_ctx
+ if is_xonly and not has_even_y(Q):
+ g = n - 1
+ else:
+ g = 1
+ t = int_from_bytes(tweak)
+ if t >= n:
+ raise ValueError('The tweak must be less than n.')
+ Q_ = point_add(point_mul(Q, g), point_mul(G, t))
+ if Q_ is None:
+ raise ValueError('The result of tweaking cannot be infinity.')
+ gacc_ = g * gacc % n
+ tacc_ = (t + g * tacc) % n
+ return KeyAggContext(Q_, gacc_, tacc_)
+
+def bytes_xor(a: bytes, b: bytes) -> bytes:
+ return bytes(x ^ y for x, y in zip(a, b))
+
+def nonce_hash(rand: bytes, pk: PlainPk, aggpk: XonlyPk, i: int, msg_prefixed: bytes, extra_in: bytes) -> int:
+ buf = b''
+ buf += rand
+ buf += len(pk).to_bytes(1, 'big')
+ buf += pk
+ buf += len(aggpk).to_bytes(1, 'big')
+ buf += aggpk
+ buf += msg_prefixed
+ buf += len(extra_in).to_bytes(4, 'big')
+ buf += extra_in
+ buf += i.to_bytes(1, 'big')
+ return int_from_bytes(tagged_hash('MuSig/nonce', buf))
+
+def nonce_gen_internal(rand_: bytes, sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
+ if sk is not None:
+ rand = bytes_xor(sk, tagged_hash('MuSig/aux', rand_))
+ else:
+ rand = rand_
+ if aggpk is None:
+ aggpk = XonlyPk(b'')
+ if msg is None:
+ msg_prefixed = b'\x00'
+ else:
+ msg_prefixed = b'\x01'
+ msg_prefixed += len(msg).to_bytes(8, 'big')
+ msg_prefixed += msg
+ if extra_in is None:
+ extra_in = b''
+ k_1 = nonce_hash(rand, pk, aggpk, 0, msg_prefixed, extra_in) % n
+ k_2 = nonce_hash(rand, pk, aggpk, 1, msg_prefixed, extra_in) % n
+ # k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
+ assert k_1 != 0
+ assert k_2 != 0
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + pk)
+ return secnonce, pubnonce
+
+def nonce_gen(sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
+ if sk is not None and len(sk) != 32:
+ raise ValueError('The optional byte array sk must have length 32.')
+ if aggpk is not None and len(aggpk) != 32:
+ raise ValueError('The optional byte array aggpk must have length 32.')
+ rand_ = secrets.token_bytes(32)
+ return nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in)
+
+def nonce_agg(pubnonces: List[bytes]) -> bytes:
+ u = len(pubnonces)
+ aggnonce = b''
+ for j in (1, 2):
+ R_j = infinity
+ for i in range(u):
+ try:
+ R_ij = cpoint(pubnonces[i][(j-1)*33:j*33])
+ except ValueError:
+ raise InvalidContributionError(i, "pubnonce")
+ R_j = point_add(R_j, R_ij)
+ aggnonce += cbytes_ext(R_j)
+ return aggnonce
+
+SessionContext = NamedTuple('SessionContext', [('aggnonce', bytes),
+ ('pubkeys', List[PlainPk]),
+ ('tweaks', List[bytes]),
+ ('is_xonly', List[bool]),
+ ('msg', bytes)])
+
+def key_agg_and_tweak(pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool]):
+ if len(tweaks) != len(is_xonly):
+ raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
+ keyagg_ctx = key_agg(pubkeys)
+ v = len(tweaks)
+ for i in range(v):
+ keyagg_ctx = apply_tweak(keyagg_ctx, tweaks[i], is_xonly[i])
+ return keyagg_ctx
+
+def get_session_values(session_ctx: SessionContext) -> Tuple[Point, int, int, int, Point, int]:
+ (aggnonce, pubkeys, tweaks, is_xonly, msg) = session_ctx
+ Q, gacc, tacc = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
+ b = int_from_bytes(tagged_hash('MuSig/noncecoef', aggnonce + xbytes(Q) + msg)) % n
+ try:
+ R_1 = cpoint_ext(aggnonce[0:33])
+ R_2 = cpoint_ext(aggnonce[33:66])
+ except ValueError:
+ # Nonce aggregator sent invalid nonces
+ raise InvalidContributionError(None, "aggnonce")
+ R_ = point_add(R_1, point_mul(R_2, b))
+ R = R_ if not is_infinite(R_) else G
+ assert R is not None
+ e = int_from_bytes(tagged_hash('BIP0340/challenge', xbytes(R) + xbytes(Q) + msg)) % n
+ return (Q, gacc, tacc, b, R, e)
+
+def get_session_key_agg_coeff(session_ctx: SessionContext, P: Point) -> int:
+ (_, pubkeys, _, _, _) = session_ctx
+ pk = PlainPk(cbytes(P))
+ if pk not in pubkeys:
+ raise ValueError('The signer\'s pubkey must be included in the list of pubkeys.')
+ return key_agg_coeff(pubkeys, pk)
+
+def sign(secnonce: bytearray, sk: bytes, session_ctx: SessionContext) -> bytes:
+ (Q, gacc, _, b, R, e) = get_session_values(session_ctx)
+ k_1_ = int_from_bytes(secnonce[0:32])
+ k_2_ = int_from_bytes(secnonce[32:64])
+ # Overwrite the secnonce argument with zeros such that subsequent calls of
+ # sign with the same secnonce raise a ValueError.
+ secnonce[:64] = bytearray(b'\x00'*64)
+ if not 0 < k_1_ < n:
+ raise ValueError('first secnonce value is out of range.')
+ if not 0 < k_2_ < n:
+ raise ValueError('second secnonce value is out of range.')
+ k_1 = k_1_ if has_even_y(R) else n - k_1_
+ k_2 = k_2_ if has_even_y(R) else n - k_2_
+ d_ = int_from_bytes(sk)
+ if not 0 < d_ < n:
+ raise ValueError('secret key value is out of range.')
+ P = point_mul(G, d_)
+ assert P is not None
+ pk = cbytes(P)
+ if not pk == secnonce[64:97]:
+ raise ValueError('Public key does not match nonce_gen argument')
+ a = get_session_key_agg_coeff(session_ctx, P)
+ g = 1 if has_even_y(Q) else n - 1
+ d = g * gacc * d_ % n
+ s = (k_1 + b * k_2 + e * a * d) % n
+ psig = bytes_from_int(s)
+ R_s1 = point_mul(G, k_1_)
+ R_s2 = point_mul(G, k_2_)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ # Optional correctness check. The result of signing should pass signature verification.
+ assert partial_sig_verify_internal(psig, pubnonce, pk, session_ctx)
+ return psig
+
+def det_nonce_hash(sk_: bytes, aggothernonce: bytes, aggpk: bytes, msg: bytes, i: int) -> int:
+ buf = b''
+ buf += sk_
+ buf += aggothernonce
+ buf += aggpk
+ buf += len(msg).to_bytes(8, 'big')
+ buf += msg
+ buf += i.to_bytes(1, 'big')
+ return int_from_bytes(tagged_hash('MuSig/deterministic/nonce', buf))
+
+def deterministic_sign(sk: bytes, aggothernonce: bytes, pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, rand: Optional[bytes]) -> Tuple[bytes, bytes]:
+ if rand is not None:
+ sk_ = bytes_xor(sk, tagged_hash('MuSig/aux', rand))
+ else:
+ sk_ = sk
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+
+ k_1 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 0) % n
+ k_2 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 1) % n
+ # k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
+ assert k_1 != 0
+ assert k_2 != 0
+
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None
+ assert R_s2 is not None
+ pubnonce = cbytes(R_s1) + cbytes(R_s2)
+ secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + individual_pk(sk))
+ try:
+ aggnonce = nonce_agg([pubnonce, aggothernonce])
+ except Exception:
+ raise InvalidContributionError(None, "aggothernonce")
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ psig = sign(secnonce, sk, session_ctx)
+ return (pubnonce, psig)
+
+def partial_sig_verify(psig: bytes, pubnonces: List[bytes], pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, i: int) -> bool:
+ if len(pubnonces) != len(pubkeys):
+ raise ValueError('The `pubnonces` and `pubkeys` arrays must have the same length.')
+ if len(tweaks) != len(is_xonly):
+ raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
+ aggnonce = nonce_agg(pubnonces)
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ return partial_sig_verify_internal(psig, pubnonces[i], pubkeys[i], session_ctx)
+
+def partial_sig_verify_internal(psig: bytes, pubnonce: bytes, pk: bytes, session_ctx: SessionContext) -> bool:
+ (Q, gacc, _, b, R, e) = get_session_values(session_ctx)
+ s = int_from_bytes(psig)
+ if s >= n:
+ return False
+ R_s1 = cpoint(pubnonce[0:33])
+ R_s2 = cpoint(pubnonce[33:66])
+ Re_s_ = point_add(R_s1, point_mul(R_s2, b))
+ Re_s = Re_s_ if has_even_y(R) else point_negate(Re_s_)
+ P = cpoint(pk)
+ if P is None:
+ return False
+ a = get_session_key_agg_coeff(session_ctx, P)
+ g = 1 if has_even_y(Q) else n - 1
+ g_ = g * gacc % n
+ return point_mul(G, s) == point_add(Re_s, point_mul(P, e * a * g_ % n))
+
+def partial_sig_agg(psigs: List[bytes], session_ctx: SessionContext) -> bytes:
+ (Q, _, tacc, _, R, e) = get_session_values(session_ctx)
+ s = 0
+ u = len(psigs)
+ for i in range(u):
+ s_i = int_from_bytes(psigs[i])
+ if s_i >= n:
+ raise InvalidContributionError(i, "psig")
+ s = (s + s_i) % n
+ g = 1 if has_even_y(Q) else n - 1
+ s = (s + e * g * tacc) % n
+ return xbytes(R) + bytes_from_int(s)
+#
+# The following code is only used for testing.
+#
+
+import json
+import os
+import sys
+
+def fromhex_all(l):
+ return [bytes.fromhex(l_i) for l_i in l]
+
+# Check that calling `try_fn` raises a `exception`. If `exception` is raised,
+# examine it with `except_fn`.
+def assert_raises(exception, try_fn, except_fn):
+ raised = False
+ try:
+ try_fn()
+ except exception as e:
+ raised = True
+ assert(except_fn(e))
+ except BaseException:
+ raise AssertionError("Wrong exception raised in a test.")
+ if not raised:
+ raise AssertionError("Exception was _not_ raised in a test where it was required.")
+
+def get_error_details(test_case):
+ error = test_case["error"]
+ if error["type"] == "invalid_contribution":
+ exception = InvalidContributionError
+ if "contrib" in error:
+ except_fn = lambda e: e.signer == error["signer"] and e.contrib == error["contrib"]
+ else:
+ except_fn = lambda e: e.signer == error["signer"]
+ elif error["type"] == "value":
+ exception = ValueError
+ except_fn = lambda e: str(e) == error["message"]
+ else:
+ raise RuntimeError(f"Invalid error type: {error['type']}")
+ return exception, except_fn
+
+def test_key_sort_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'key_sort_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ X_sorted = fromhex_all(test_data["sorted_pubkeys"])
+
+ assert key_sort(X) == X_sorted
+
+def test_key_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'key_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+ T = fromhex_all(test_data["tweaks"])
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+
+ assert get_xonly_pk(key_agg(pubkeys)) == expected
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [T[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+
+ assert_raises(exception, lambda: key_agg_and_tweak(pubkeys, tweaks, is_xonly), except_fn)
+
+def test_nonce_gen_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'nonce_gen_vectors.json')) as f:
+ test_data = json.load(f)
+
+ for test_case in test_data["test_cases"]:
+ def get_value(key) -> bytes:
+ return bytes.fromhex(test_case[key])
+
+ def get_value_maybe(key) -> Optional[bytes]:
+ if test_case[key] is not None:
+ return get_value(key)
+ else:
+ return None
+
+ rand_ = get_value("rand_")
+ sk = get_value_maybe("sk")
+ pk = PlainPk(get_value("pk"))
+ aggpk = get_value_maybe("aggpk")
+ if aggpk is not None:
+ aggpk = XonlyPk(aggpk)
+ msg = get_value_maybe("msg")
+ extra_in = get_value_maybe("extra_in")
+ expected_secnonce = get_value("expected_secnonce")
+ expected_pubnonce = get_value("expected_pubnonce")
+
+ assert nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in) == (expected_secnonce, expected_pubnonce)
+
+def test_nonce_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'nonce_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ pnonce = fromhex_all(test_data["pnonces"])
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+ assert nonce_agg(pubnonces) == expected
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+ pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
+ assert_raises(exception, lambda: nonce_agg(pubnonces), except_fn)
+
+def test_sign_verify_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ secnonces = fromhex_all(test_data["secnonces"])
+ pnonce = fromhex_all(test_data["pnonces"])
+ # The public nonce corresponding to secnonces[0] is at index 0
+ k_1 = int_from_bytes(secnonces[0][0:32])
+ k_2 = int_from_bytes(secnonces[0][32:64])
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None and R_s2 is not None
+ assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
+
+ aggnonces = fromhex_all(test_data["aggnonces"])
+ # The aggregate of the first three elements of pnonce is at index 0
+ assert(aggnonces[0] == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
+
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ sign_error_test_cases = test_data["sign_error_test_cases"]
+ verify_fail_test_cases = test_data["verify_fail_test_cases"]
+ verify_error_test_cases = test_data["verify_error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ # Make sure that pubnonces and aggnonce in the test vector are
+ # consistent
+ assert nonce_agg(pubnonces) == aggnonce
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ secnonce_tmp = bytearray(secnonces[0])
+ assert sign(secnonce_tmp, sk, session_ctx) == expected
+ assert partial_sig_verify(expected, pubnonces, pubkeys, [], [], msg, signer_index)
+
+ for i, test_case in enumerate(sign_error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggnonce = aggnonces[test_case["aggnonce_index"]]
+ msg = msgs[test_case["msg_index"]]
+ secnonce = bytearray(secnonces[test_case["secnonce_index"]])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
+ assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
+
+ for test_case in verify_fail_test_cases:
+ sig = bytes.fromhex(test_case["sig"])
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+
+ assert not partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index)
+
+ for i, test_case in enumerate(verify_error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ sig = bytes.fromhex(test_case["sig"])
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+
+ assert_raises(exception, lambda: partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index), except_fn)
+
+def test_tweak_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ secnonce = bytearray(bytes.fromhex(test_data["secnonce"]))
+ pnonce = fromhex_all(test_data["pnonces"])
+ # The public nonce corresponding to secnonce is at index 0
+ k_1 = int_from_bytes(secnonce[0:32])
+ k_2 = int_from_bytes(secnonce[32:64])
+ R_s1 = point_mul(G, k_1)
+ R_s2 = point_mul(G, k_2)
+ assert R_s1 is not None and R_s2 is not None
+ assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
+
+ aggnonce = bytes.fromhex(test_data["aggnonce"])
+ # The aggnonce is the aggregate of the first three elements of pnonce
+ assert(aggnonce == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
+
+ tweak = fromhex_all(test_data["tweaks"])
+ msg = bytes.fromhex(test_data["msg"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ signer_index = test_case["signer_index"]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ secnonce_tmp = bytearray(secnonce)
+ # WARNING: An actual implementation should _not_ copy the secnonce.
+ # Reusing the secnonce, as we do here for testing purposes, can leak the
+ # secret key.
+ assert sign(secnonce_tmp, sk, session_ctx) == expected
+ assert partial_sig_verify(expected, pubnonces, pubkeys, tweaks, is_xonly, msg, signer_index)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ signer_index = test_case["signer_index"]
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
+
+def test_det_sign_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'det_sign_vectors.json')) as f:
+ test_data = json.load(f)
+
+ sk = bytes.fromhex(test_data["sk"])
+ X = fromhex_all(test_data["pubkeys"])
+ # The public key corresponding to sk is at index 0
+ assert X[0] == individual_pk(sk)
+
+ msgs = fromhex_all(test_data["msgs"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggothernonce = bytes.fromhex(test_case["aggothernonce"])
+ tweaks = fromhex_all(test_case["tweaks"])
+ is_xonly = test_case["is_xonly"]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
+ expected = fromhex_all(test_case["expected"])
+
+ pubnonce, psig = deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+ assert pubnonce == expected[0]
+ assert psig == expected[1]
+
+ pubnonces = [aggothernonce, pubnonce]
+ aggnonce = nonce_agg(pubnonces)
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert partial_sig_verify_internal(psig, pubnonce, pubkeys[signer_index], session_ctx)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ aggothernonce = bytes.fromhex(test_case["aggothernonce"])
+ tweaks = fromhex_all(test_case["tweaks"])
+ is_xonly = test_case["is_xonly"]
+ msg = msgs[test_case["msg_index"]]
+ signer_index = test_case["signer_index"]
+ rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
+
+ try_fn = lambda: deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+ assert_raises(exception, try_fn, except_fn)
+
+def test_sig_agg_vectors() -> None:
+ with open(os.path.join(sys.path[0], 'vectors', 'sig_agg_vectors.json')) as f:
+ test_data = json.load(f)
+
+ X = fromhex_all(test_data["pubkeys"])
+
+ # These nonces are only required if the tested API takes the individual
+ # nonces and not the aggregate nonce.
+ pnonce = fromhex_all(test_data["pnonces"])
+
+ tweak = fromhex_all(test_data["tweaks"])
+ psig = fromhex_all(test_data["psigs"])
+
+ msg = bytes.fromhex(test_data["msg"])
+
+ valid_test_cases = test_data["valid_test_cases"]
+ error_test_cases = test_data["error_test_cases"]
+
+ for test_case in valid_test_cases:
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = bytes.fromhex(test_case["aggnonce"])
+ assert aggnonce == nonce_agg(pubnonces)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ psigs = [psig[i] for i in test_case["psig_indices"]]
+ expected = bytes.fromhex(test_case["expected"])
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ sig = partial_sig_agg(psigs, session_ctx)
+ assert sig == expected
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+ assert schnorr_verify(msg, aggpk, sig)
+
+ for i, test_case in enumerate(error_test_cases):
+ exception, except_fn = get_error_details(test_case)
+
+ pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
+ aggnonce = nonce_agg(pubnonces)
+
+ pubkeys = [X[i] for i in test_case["key_indices"]]
+ tweaks = [tweak[i] for i in test_case["tweak_indices"]]
+ is_xonly = test_case["is_xonly"]
+ psigs = [psig[i] for i in test_case["psig_indices"]]
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ assert_raises(exception, lambda: partial_sig_agg(psigs, session_ctx), except_fn)
+
+def test_sign_and_verify_random(iters: int) -> None:
+ for i in range(iters):
+ sk_1 = secrets.token_bytes(32)
+ sk_2 = secrets.token_bytes(32)
+ pk_1 = individual_pk(sk_1)
+ pk_2 = individual_pk(sk_2)
+ pubkeys = [pk_1, pk_2]
+
+ # In this example, the message and aggregate pubkey are known
+ # before nonce generation, so they can be passed into the nonce
+ # generation function as a defense-in-depth measure to protect
+ # against nonce reuse.
+ #
+ # If these values are not known when nonce_gen is called, empty
+ # byte arrays can be passed in for the corresponding arguments
+ # instead.
+ msg = secrets.token_bytes(32)
+ v = secrets.randbelow(4)
+ tweaks = [secrets.token_bytes(32) for _ in range(v)]
+ is_xonly = [secrets.choice([False, True]) for _ in range(v)]
+ aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
+
+ # Use a non-repeating counter for extra_in
+ secnonce_1, pubnonce_1 = nonce_gen(sk_1, pk_1, aggpk, msg, i.to_bytes(4, 'big'))
+
+ # On even iterations use regular signing algorithm for signer 2,
+ # otherwise use deterministic signing algorithm
+ if i % 2 == 0:
+ # Use a clock for extra_in
+ t = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
+ secnonce_2, pubnonce_2 = nonce_gen(sk_2, pk_2, aggpk, msg, t.to_bytes(8, 'big'))
+ else:
+ aggothernonce = nonce_agg([pubnonce_1])
+ rand = secrets.token_bytes(32)
+ pubnonce_2, psig_2 = deterministic_sign(sk_2, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
+
+ pubnonces = [pubnonce_1, pubnonce_2]
+ aggnonce = nonce_agg(pubnonces)
+
+ session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
+ psig_1 = sign(secnonce_1, sk_1, session_ctx)
+ assert partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 0)
+ # An exception is thrown if secnonce_1 is accidentally reused
+ assert_raises(ValueError, lambda: sign(secnonce_1, sk_1, session_ctx), lambda e: True)
+
+ # Wrong signer index
+ assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
+
+ # Wrong message
+ assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, secrets.token_bytes(32), 0)
+
+ if i % 2 == 0:
+ psig_2 = sign(secnonce_2, sk_2, session_ctx)
+ assert partial_sig_verify(psig_2, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
+
+ sig = partial_sig_agg([psig_1, psig_2], session_ctx)
+ assert schnorr_verify(msg, aggpk, sig)
+
+if __name__ == '__main__':
+ test_key_sort_vectors()
+ test_key_agg_vectors()
+ test_nonce_gen_vectors()
+ test_nonce_agg_vectors()
+ test_sign_verify_vectors()
+ test_tweak_vectors()
+ test_det_sign_vectors()
+ test_sig_agg_vectors()
+ test_sign_and_verify_random(6)
diff --git a/bip-0327/tests.sh b/bip-0327/tests.sh
new file mode 100755
index 0000000..b363f40
--- /dev/null
+++ b/bip-0327/tests.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+set -e
+
+cd "$(dirname "$0")"
+mypy --no-error-summary reference.py
+python3 reference.py
+python3 gen_vectors_helper.py > /dev/null
diff --git a/bip-0327/vectors/det_sign_vectors.json b/bip-0327/vectors/det_sign_vectors.json
new file mode 100644
index 0000000..261669c
--- /dev/null
+++ b/bip-0327/vectors/det_sign_vectors.json
@@ -0,0 +1,144 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "020000000000000000000000000000000000000000000000000000000000000007"
+ ],
+ "msgs": [
+ "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "2626262626262626262626262626262626262626262626262626262626262626262626262626"
+ ],
+ "valid_test_cases": [
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [0, 1, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": [
+ "03D96275257C2FCCBB6EEB77BDDF51D3C88C26EE1626C6CDA8999B9D34F4BA13A60309BE2BF883C6ABE907FA822D9CA166D51A3DCC28910C57528F6983FC378B7843",
+ "41EA65093F71D084785B20DC26A887CD941C9597860A21660CBDB9CC2113CAD3"
+ ]
+ },
+ {
+ "rand": null,
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 0, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "expected": [
+ "028FBCCF5BB73A7B61B270BAD15C0F9475D577DD85C2157C9D38BEF1EC922B48770253BE3638C87369BC287E446B7F2C8CA5BEB9FFBD1EA082C62913982A65FC214D",
+ "AEAA31262637BFA88D5606679018A0FEEEC341F3107D1199857F6C81DE61B8DD"
+ ]
+ },
+ {
+ "rand": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "aggothernonce": "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 1,
+ "signer_index": 2,
+ "expected": [
+ "024FA8D774F0C8743FAA77AFB4D08EE5A013C2E8EEAD8A6F08A77DDD2D28266DB803050905E8C994477F3F2981861A2E3791EF558626E645FBF5AA131C5D6447C2C2",
+ "FEE28A56B8556B7632E42A84122C51A4861B1F2DEC7E81B632195E56A52E3E13"
+ ],
+ "comment": "Message longer than 32 bytes"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
+ "key_indices": [0, 1, 2],
+ "tweaks": ["E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB"],
+ "is_xonly": [true],
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": [
+ "031E07C0D11A0134E55DB1FC16095ADCBD564236194374AA882BFB3C78273BF673039D0336E8CA6288C00BFC1F8B594563529C98661172B9BC1BE85C23A4CE1F616B",
+ "7B1246C5889E59CB0375FA395CC86AC42D5D7D59FD8EAB4FDF1DCAB2B2F006EA"
+ ],
+ "comment": "Tweaked public key"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 0, 3],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 2,
+ "contrib": "pubkey"
+ },
+ "comment": "Signer 2 provided an invalid public key"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 1,
+ "error": {
+ "type": "value",
+ "message": "The signer's pubkey must be included in the list of pubkeys."
+ },
+ "comment": "The signers pubkey is not in the list of pubkeys"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0437C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggothernonce"
+ },
+ "comment": "aggothernonce is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0000000000000000000000000000000000000000000000000000000000000000000287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": [],
+ "is_xonly": [],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggothernonce"
+ },
+ "comment": "aggothernonce is invalid because first half corresponds to point at infinity"
+ },
+ {
+ "rand": "0000000000000000000000000000000000000000000000000000000000000000",
+ "aggothernonce": "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "key_indices": [1, 2, 0],
+ "tweaks": ["FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"],
+ "is_xonly": [false],
+ "msg_index": 0,
+ "signer_index": 2,
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/key_agg_vectors.json b/bip-0327/vectors/key_agg_vectors.json
new file mode 100644
index 0000000..b2e623d
--- /dev/null
+++ b/bip-0327/vectors/key_agg_vectors.json
@@ -0,0 +1,88 @@
+{
+ "pubkeys": [
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "020000000000000000000000000000000000000000000000000000000000000005",
+ "02FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
+ "04F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
+ ],
+ "tweaks": [
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
+ "252E4BD67410A76CDF933D30EAA1608214037F1B105A013ECCD3C5C184A6110B"
+ ],
+ "valid_test_cases": [
+ {
+ "key_indices": [0, 1, 2],
+ "expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
+ },
+ {
+ "key_indices": [2, 1, 0],
+ "expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
+ },
+ {
+ "key_indices": [0, 0, 0],
+ "expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
+ },
+ {
+ "key_indices": [0, 0, 1, 1],
+ "expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "key_indices": [0, 3],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubkey"
+ },
+ "comment": "Invalid public key"
+ },
+ {
+ "key_indices": [0, 4],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubkey"
+ },
+ "comment": "Public key exceeds field size"
+ },
+ {
+ "key_indices": [5, 0],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubkey"
+ },
+ "comment": "First byte of public key is not 2 or 3"
+ },
+ {
+ "key_indices": [0, 1],
+ "tweak_indices": [0],
+ "is_xonly": [true],
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is out of range"
+ },
+ {
+ "key_indices": [6],
+ "tweak_indices": [1],
+ "is_xonly": [false],
+ "error": {
+ "type": "value",
+ "message": "The result of tweaking cannot be infinity."
+ },
+ "comment": "Intermediate tweaking result is point at infinity"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/key_sort_vectors.json b/bip-0327/vectors/key_sort_vectors.json
new file mode 100644
index 0000000..de088a7
--- /dev/null
+++ b/bip-0327/vectors/key_sort_vectors.json
@@ -0,0 +1,18 @@
+{
+ "pubkeys": [
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EFF",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8"
+ ],
+ "sorted_pubkeys": [
+ "023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
+ "02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EFF",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
+ ]
+}
diff --git a/bip-0327/vectors/nonce_agg_vectors.json b/bip-0327/vectors/nonce_agg_vectors.json
new file mode 100644
index 0000000..1c04b88
--- /dev/null
+++ b/bip-0327/vectors/nonce_agg_vectors.json
@@ -0,0 +1,51 @@
+{
+ "pnonces": [
+ "020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E66603BA47FBC1834437B3212E89A84D8425E7BF12E0245D98262268EBDCB385D50641",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
+ "020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E6660279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60379BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "04FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B831",
+ "03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A602FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
+ ],
+ "valid_test_cases": [
+ {
+ "pnonce_indices": [0, 1],
+ "expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
+ },
+ {
+ "pnonce_indices": [2, 3],
+ "expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
+ "comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "pnonce_indices": [0, 4],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 1 is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "pnonce_indices": [5, 1],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 0 is invalid because the second half does not correspond to an X coordinate"
+ },
+ {
+ "pnonce_indices": [6, 1],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Public nonce from signer 0 is invalid because second half exceeds field size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/nonce_gen_vectors.json b/bip-0327/vectors/nonce_gen_vectors.json
new file mode 100644
index 0000000..ced946f
--- /dev/null
+++ b/bip-0327/vectors/nonce_gen_vectors.json
@@ -0,0 +1,44 @@
+{
+ "test_cases": [
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "0101010101010101010101010101010101010101010101010101010101010101",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "B114E502BEAA4E301DD08A50264172C84E41650E6CB726B410C0694D59EFFB6495B5CAF28D045B973D63E3C99A44B807BDE375FD6CB39E46DC4A511708D0E9D2024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "02F7BE7089E8376EB355272368766B17E88E7DB72047D05E56AA881EA52B3B35DF02C29C8046FDD0DED4C7E55869137200FBDBFE2EB654267B6D7013602CAED3115A"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "E862B068500320088138468D47E0E6F147E01B6024244AE45EAC40ACE5929B9F0789E051170B9E705D0B9EB49049A323BBBBB206D8E05C19F46C6228742AA7A9024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "023034FA5E2679F01EE66E12225882A7A48CC66719B1B9D3B6C4DBD743EFEDA2C503F3FD6F01EB3A8E9CB315D73F1F3D287CAFBB44AB321153C6287F407600205109"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": "0202020202020202020202020202020202020202020202020202020202020202",
+ "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
+ "msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626",
+ "extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
+ "expected_secnonce": "3221975ACBDEA6820EABF02A02B7F27D3A8EF68EE42787B88CBEFD9AA06AF3632EE85B1A61D8EF31126D4663A00DD96E9D1D4959E72D70FE5EBB6E7696EBA66F024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
+ "expected_pubnonce": "02E5BBC21C69270F59BD634FCBFA281BE9D76601295345112C58954625BF23793A021307511C79F95D38ACACFF1B4DA98228B77E65AA216AD075E9673286EFB4EAF3"
+ },
+ {
+ "rand_": "0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F",
+ "sk": null,
+ "pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "aggpk": null,
+ "msg": null,
+ "extra_in": null,
+ "expected_secnonce": "89BDD787D0284E5E4D5FC572E49E316BAB7E21E3B1830DE37DFE80156FA41A6D0B17AE8D024C53679699A6FD7944D9C4A366B514BAF43088E0708B1023DD289702F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "expected_pubnonce": "02C96E7CB1E8AA5DAC64D872947914198F607D90ECDE5200DE52978AD5DED63C000299EC5117C2D29EDEE8A2092587C3909BE694D5CFF0667D6C02EA4059F7CD9786"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/sig_agg_vectors.json b/bip-0327/vectors/sig_agg_vectors.json
new file mode 100644
index 0000000..04a7bc6
--- /dev/null
+++ b/bip-0327/vectors/sig_agg_vectors.json
@@ -0,0 +1,151 @@
+{
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02D2DC6F5DF7C56ACF38C7FA0AE7A759AE30E19B37359DFDE015872324C7EF6E05",
+ "03C7FB101D97FF930ACD0C6760852EF64E69083DE0B06AC6335724754BB4B0522C",
+ "02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581"
+ ],
+ "pnonces": [
+ "036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E",
+ "03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00",
+ "02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6",
+ "031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9",
+ "023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A",
+ "02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00"
+ ],
+ "tweaks": [
+ "B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
+ "A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
+ "75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"
+ ],
+ "psigs": [
+ "B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB",
+ "6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64",
+ "9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505",
+ "66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15",
+ "4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE",
+ "DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4",
+ "97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC",
+ "53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971",
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
+ ],
+ "msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869",
+ "valid_test_cases": [
+ {
+ "aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
+ "nonce_indices": [
+ 0,
+ 1
+ ],
+ "key_indices": [
+ 0,
+ 1
+ ],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [
+ 0,
+ 1
+ ],
+ "expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
+ },
+ {
+ "aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
+ "nonce_indices": [
+ 0,
+ 2
+ ],
+ "key_indices": [
+ 0,
+ 2
+ ],
+ "tweak_indices": [],
+ "is_xonly": [],
+ "psig_indices": [
+ 2,
+ 3
+ ],
+ "expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
+ },
+ {
+ "aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
+ "nonce_indices": [
+ 0,
+ 3
+ ],
+ "key_indices": [
+ 0,
+ 2
+ ],
+ "tweak_indices": [
+ 0
+ ],
+ "is_xonly": [
+ false
+ ],
+ "psig_indices": [
+ 4,
+ 5
+ ],
+ "expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
+ },
+ {
+ "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
+ "nonce_indices": [
+ 0,
+ 4
+ ],
+ "key_indices": [
+ 0,
+ 3
+ ],
+ "tweak_indices": [
+ 0,
+ 1,
+ 2
+ ],
+ "is_xonly": [
+ true,
+ false,
+ true
+ ],
+ "psig_indices": [
+ 6,
+ 7
+ ],
+ "expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
+ }
+ ],
+ "error_test_cases": [
+ {
+ "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
+ "nonce_indices": [
+ 0,
+ 4
+ ],
+ "key_indices": [
+ 0,
+ 3
+ ],
+ "tweak_indices": [
+ 0,
+ 1,
+ 2
+ ],
+ "is_xonly": [
+ true,
+ false,
+ true
+ ],
+ "psig_indices": [
+ 7,
+ 8
+ ],
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 1
+ },
+ "comment": "Partial signature is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/sign_verify_vectors.json b/bip-0327/vectors/sign_verify_vectors.json
new file mode 100644
index 0000000..b467640
--- /dev/null
+++ b/bip-0327/vectors/sign_verify_vectors.json
@@ -0,0 +1,212 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA661",
+ "020000000000000000000000000000000000000000000000000000000000000007"
+ ],
+ "secnonces": [
+ "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
+ ],
+ "pnonces": [
+ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
+ "0237C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0387BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0200000000000000000000000000000000000000000000000000000000000000090287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480"
+ ],
+ "aggnonces": [
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "048465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61020000000000000000000000000000000000000000000000000000000000000009",
+ "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD6102FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
+ ],
+ "msgs": [
+ "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "",
+ "2626262626262626262626262626262626262626262626262626262626262626262626262626"
+ ],
+ "valid_test_cases": [
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
+ },
+ {
+ "key_indices": [1, 0, 2],
+ "nonce_indices": [1, 0, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 1,
+ "expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 2,
+ "expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
+ },
+ {
+ "key_indices": [0, 1],
+ "nonce_indices": [0, 3],
+ "aggnonce_index": 1,
+ "msg_index": 0,
+ "signer_index": 0,
+ "expected": "AE386064B26105404798F75DE2EB9AF5EDA5387B064B83D049CB7C5E08879531",
+ "comment": "Both halves of aggregate nonce correspond to point at infinity"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 1,
+ "signer_index": 0,
+ "expected": "D7D63FFD644CCDA4E62BC2BC0B1D02DD32A1DC3030E155195810231D1037D82D",
+ "comment": "Empty message"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 2,
+ "signer_index": 0,
+ "expected": "E184351828DA5094A97C79CABDAAA0BFB87608C32E8829A4DF5340A6F243B78C",
+ "comment": "38-byte message"
+ }
+ ],
+ "sign_error_test_cases": [
+ {
+ "key_indices": [1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "value",
+ "message": "The signer's pubkey must be included in the list of pubkeys."
+ },
+ "comment": "The signers pubkey is not in the list of pubkeys. This test case is optional: it can be skipped by implementations that do not check that the signer's pubkey is included in the list of pubkeys."
+ },
+ {
+ "key_indices": [1, 0, 3],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 2,
+ "contrib": "pubkey"
+ },
+ "comment": "Signer 2 provided an invalid public key"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 2,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 3,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "aggnonce_index": 4,
+ "msg_index": 0,
+ "secnonce_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": null,
+ "contrib": "aggnonce"
+ },
+ "comment": "Aggregate nonce is invalid because second half exceeds field size"
+ },
+ {
+ "key_indices": [0, 1, 2],
+ "aggnonce_index": 0,
+ "msg_index": 0,
+ "signer_index": 0,
+ "secnonce_index": 1,
+ "error": {
+ "type": "value",
+ "message": "first secnonce value is out of range."
+ },
+ "comment": "Secnonce is invalid which may indicate nonce reuse"
+ }
+ ],
+ "verify_fail_test_cases": [
+ {
+ "sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "comment": "Wrong signature (which is equal to the negation of valid signature)"
+ },
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 1,
+ "comment": "Wrong signer"
+ },
+ {
+ "sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "comment": "Signature exceeds group size"
+ }
+ ],
+ "verify_error_test_cases": [
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [0, 1, 2],
+ "nonce_indices": [4, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubnonce"
+ },
+ "comment": "Invalid pubnonce"
+ },
+ {
+ "sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
+ "key_indices": [3, 1, 2],
+ "nonce_indices": [0, 1, 2],
+ "msg_index": 0,
+ "signer_index": 0,
+ "error": {
+ "type": "invalid_contribution",
+ "signer": 0,
+ "contrib": "pubkey"
+ },
+ "comment": "Invalid pubkey"
+ }
+ ]
+}
diff --git a/bip-0327/vectors/tweak_vectors.json b/bip-0327/vectors/tweak_vectors.json
new file mode 100644
index 0000000..d0a7cfe
--- /dev/null
+++ b/bip-0327/vectors/tweak_vectors.json
@@ -0,0 +1,84 @@
+{
+ "sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
+ "pubkeys": [
+ "03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
+ "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
+ ],
+ "secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
+ "pnonces": [
+ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
+ "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
+ "032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046"
+ ],
+ "aggnonce": "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
+ "tweaks": [
+ "E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB",
+ "AE2EA797CC0FE72AC5B97B97F3C6957D7E4199A167A58EB08BCAFFDA70AC0455",
+ "F52ECBC565B3D8BEA2DFD5B75A4F457E54369809322E4120831626F290FA87E0",
+ "1969AD73CC177FA0B4FCED6DF1F7BF9907E665FDE9BA196A74FED0A3CF5AEF9D",
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
+ ],
+ "msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
+ "valid_test_cases": [
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0],
+ "is_xonly": [true],
+ "signer_index": 2,
+ "expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
+ "comment": "A single x-only tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0],
+ "is_xonly": [false],
+ "signer_index": 2,
+ "expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
+ "comment": "A single plain tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1],
+ "is_xonly": [false, true],
+ "signer_index": 2,
+ "expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
+ "comment": "A plain tweak followed by an x-only tweak"
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1, 2, 3],
+ "is_xonly": [false, false, true, true],
+ "signer_index": 2,
+ "expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
+ "comment": "Four tweaks: plain, plain, x-only, x-only."
+ },
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [0, 1, 2, 3],
+ "is_xonly": [true, false, true, false],
+ "signer_index": 2,
+ "expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
+ "comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
+ }
+ ],
+ "error_test_cases": [
+ {
+ "key_indices": [1, 2, 0],
+ "nonce_indices": [1, 2, 0],
+ "tweak_indices": [4],
+ "is_xonly": [false],
+ "signer_index": 2,
+ "error": {
+ "type": "value",
+ "message": "The tweak must be less than n."
+ },
+ "comment": "Tweak is invalid because it exceeds group size"
+ }
+ ]
+}
diff --git a/bip-0329.mediawiki b/bip-0329.mediawiki
new file mode 100644
index 0000000..fc5da42
--- /dev/null
+++ b/bip-0329.mediawiki
@@ -0,0 +1,145 @@
+<pre>
+ BIP: 329
+ Layer: Applications
+ Title: Wallet Labels Export Format
+ Author: Craig Raw <craig@sparrowwallet.com>
+ Comments-Summary: No comments yet.
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0329
+ Status: Draft
+ Type: Informational
+ Created: 2022-08-23
+ License: BSD-2-Clause
+</pre>
+
+==Abstract==
+
+This document specifies a format for the export of labels that may be attached to various common types of records in a wallet.
+
+==Copyright==
+
+This BIP is licensed under the BSD 2-clause license.
+
+==Motivation==
+
+The export and import of funds across different Bitcoin wallet applications is well defined through standards such as BIP39, BIP32, BIP44 etc.
+These standards are well supported and allow users to move easily between different wallets.
+There is, however, no defined standard to transfer any labels the user may have applied to the transactions, addresses, public keys, inputs, outputs or xpubs in their wallet.
+The UTXO model that Bitcoin uses makes these labels particularly valuable as they may indicate the source of funds, whether received externally or as a result of change from a prior transaction.
+In both cases, care must be taken when spending to avoid undesirable leaks of private information.
+
+Labels provide valuable guidance in this regard, and have even become mandatory when spending in several Bitcoin wallets.
+Allowing users to import and export their labels in a standardized way ensures that they do not experience lock-in to a particular wallet application.
+In addition, many wallets allow unspent outputs to be frozen or made unspendable within the wallet. Since this wallet-related metadata is similar to labels and not captured elsewhere, it is also included in this format.
+
+==Rationale==
+
+While there is currently no widely accepted format for exporting and importing labels, there are existing formats in use.
+SLIP-0015<ref>[https://github.com/satoshilabs/slips/blob/master/slip-0015.md SLIP-0015]</ref> defines a format for exporting address and output labels, but requires encryption using a private key associated with the wallet seed, and thus cannot be used independently by coordinator wallets which cannot access private keys.
+The Electrum wallet imports and exports address and transaction labels in a JSON format which could be used with other record types, but the format used is not self describing making record type identification difficult.
+
+==Specification==
+
+In order to be lightweight, human readable and well structured, this BIP uses a JSON format.
+Further, the JSON Lines format is used (also called newline-delimited JSON)<ref>[https://jsonlines.org/ jsonlines.org]</ref>.
+This allows a document to be split, streamed, or incrementally added to, and limits the potential for formatting errors to invalidate an entire import.
+It is also a convenient format for command-line processing, which is often line-oriented.
+
+Further to the JSON Lines specification, an export of labels from a wallet must be a UTF-8 encoded text file, containing one record per line consisting of a valid JSON object.
+Lines are separated by <tt>\n</tt>. Multiline values are not permitted.
+Each JSON object must contain 3 or 4 key/value pairs, defined as follows:
+
+{| class="wikitable"
+|-
+! Key
+! Description
+|-
+| <tt>type</tt>
+| One of <tt>tx</tt>, <tt>addr</tt>, <tt>pubkey</tt>, <tt>input</tt>, <tt>output</tt> or <tt>xpub</tt>
+|-
+| <tt>ref</tt>
+| Reference to the transaction, address, public key, input, output or extended public key
+|-
+| <tt>label</tt>
+| The label applied to the reference
+|-
+| <tt>origin</tt>
+| Optional key origin information referencing the wallet associated with the label
+|-
+| <tt>spendable</tt>
+| One of <tt>true</tt> or <tt>false</tt>, denoting if an output should be spendable by the wallet
+|}
+
+The reference is defined for each <tt>type</tt> as follows:
+
+{| class="wikitable"
+|-
+! Type
+! Description
+! Example
+|-
+| <tt>tx</tt>
+| Transaction id in hexadecimal format
+| <tt>f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd</tt>
+|-
+| <tt>addr</tt>
+| Address in base58 or bech32 format
+| <tt>bc1q34aq5drpuwy3wgl9lhup9892qp6svr8ldzyy7c</tt>
+|-
+| <tt>pubkey</tt>
+| 32, 33 or 65 byte public key in hexadecimal format
+| <tt>0283409659355b6d1cc3c32decd5d561abaac86c37a353b52895a5e6c196d6f448</tt>
+|-
+| <tt>input</tt>
+| Transaction id and input index separated by a colon
+| <tt>f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:0</tt>
+|-
+| <tt>output</tt>
+| Transaction id and output index separated by a colon
+| <tt>f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:1</tt>
+|-
+| <tt>xpub</tt>
+| Extended public key as defined by BIP32
+| <tt>xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8Nq...</tt>
+|}
+
+Each JSON object must contain both <tt>type</tt> and <tt>ref</tt> properties. The <tt>label</tt>, <tt>origin</tt> and <tt>spendable</tt> properties are optional. If the <tt>label</tt> or <tt>spendable</tt> properties are omitted, the importing wallet should not alter these values. The <tt>origin</tt> property should only appear where type is <tt>tx</tt>, and the <tt>spendable</tt> property only where type is <tt>output</tt>.
+
+If present, the optional <tt>origin</tt> property must contain an abbreviated output descriptor (as defined by BIP380<ref>[https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki BIP-0380]</ref>) describing a BIP32 compatible originating wallet, including all key origin information but excluding any actual keys, any child path elements, or a checksum.
+This property should be used to disambiguate transaction labels from different wallets contained in the same export, particularly when exporting multiple accounts derived from the same seed.
+
+Care should be taken when exporting due to the privacy sensitive nature of the data.
+Encryption in transit over untrusted networks is highly recommended, and encryption at rest should also be considered.
+Unencrypted exports should be deleted as soon as possible.
+For security reasons no private key types are defined.
+
+==Importing==
+
+* An importing wallet may ignore records it does not store, and truncate labels if necessary. A suggested default for maximum label length is 255 characters, and an importing wallet should consider warning the user if truncation is applied.
+* Wallets importing public key records may derive addresses from them to match against known wallet addresses.
+* Wallets importing extended public keys may match them against signers, for example in a multisig setup.
+
+==Backwards Compatibility==
+
+The nature of this format makes it naturally extensible to handle other record types.
+However, importing wallets complying to this specification may ignore types not defined here.
+
+==Test Vectors==
+
+The following fragment represents a wallet label export:
+<pre>
+{ "type": "tx", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd", "label": "Transaction", "origin": "wpkh([d34db33f/84'/0'/0'])" }
+{ "type": "addr", "ref": "bc1q34aq5drpuwy3wgl9lhup9892qp6svr8ldzyy7c", "label": "Address" }
+{ "type": "pubkey", "ref": "0283409659355b6d1cc3c32decd5d561abaac86c37a353b52895a5e6c196d6f448", "label": "Public Key" }
+{ "type": "input", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:0", "label": "Input" }
+{ "type": "output", "ref": "f91d0a8a78462bc59398f2c5d7a84fcff491c26ba54c4833478b202796c8aafd:1", "label": "Output" , "spendable" : "false" }
+{ "type": "xpub", "ref": "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8", "label": "Extended Public Key" }
+{ "type": "tx", "ref": "f546156d9044844e02b181026a1a407abfca62e7ea1159f87bbeaa77b4286c74", "label": "Account #1 Transaction", "origin": "wpkh([d34db33f/84'/0'/1'])" }
+</pre>
+
+==Reference Implementation==
+
+TBD
+
+==References==
+
+<references />
diff --git a/bip-0330.mediawiki b/bip-0330.mediawiki
index 581b6ae..c24ea42 100644
--- a/bip-0330.mediawiki
+++ b/bip-0330.mediawiki
@@ -29,19 +29,24 @@ Increasing the connectivity of the network makes the network more robust to part
[https://arxiv.org/pdf/1905.10518.pdf Erlay] is an example of a high-level transaction relay protocol which employs set reconciliation for bandwidth efficiency.
+Note that what we are going to describe here is a modified version from the protocol (it is different from what is presented in the paper).
+
Erlay uses both flooding (announcing using INV messages to all peers) and reconciliation to announce transactions.
-Flooding is expensive, so Erlay seeks to use it sparingly and in strategic locations - only well-connected publicly reachable nodes flood transactions to other publicly reachable nodes via outbound connections.
-Since every unreachable node is directly connected to several reachable nodes, this policy ensures that a transaction is quickly propagated to be within one hop from most of the nodes in the network.
+Flooding is expensive, so Erlay seeks to use it only when necessary to facilitate rapid relay over a small subset of connections.
+
+Efficient set reconciliation is meant to deliver transactions to those nodes which didn't receive a transaction via flooding, and also just make sure remaining connections are in sync (directly connected pairs of nodes are aware they have nothing to learn from each other).
+
+Efficient set reconciliation works as follows:
+1) every node keeps a reconciliation set for each peer, in which transactions are placed which would have been announced using INV messages absent this protocol
+2) once in a while every node chooses a peer from its reconciliation queue to reconcile with, resulting in both sides learning the transactions known to the other side
+3) after every reconciliation round, the corresponding reconciliation set is cleared
-All transactions not propagated through flooding are propagated through efficient set reconciliation.
-To do this, every node keeps a reconciliation set for each peer, in which transactions are placed which would have been announced using INV messages absent this protocol. Every 2 seconds every node chooses a peer from its outbound connections in a predetermined order to reconcile with, resulting in both sides learning the transactions known to the other side. After every reconciliation round, the corresponding reconciliation set is cleared.
-A more detailed description of a set reconciliation round and other implementation details can be found in the paper.
+A more detailed description of a set reconciliation round can be found below.
Erlay allows us to:
-* save 40% of the bandwidth consumed by a node, given typical network connectivity as of July 2019.
-* achieve similar latency
+* save a significant portion of the bandwidth consumed by a node
* increase network connectivity for almost no bandwidth or latency cost
-* improves privacy as a side-effect
+* keep transaction propagation latency at the same level
This document proposes a P2P-layer extension which is required to enable efficient reconciliation-based protocols (like Erlay) for transaction relay.
@@ -52,13 +57,11 @@ This document proposes a P2P-layer extension which is required to enable efficie
Several new data structures are introduced to the P2P protocol first, to aid with efficient transaction relay.
====32-bit short transaction IDs====
-
-During reconciliation, significantly abbreviated transaction IDs are used of just 32 bits in size. To prevent attackers from constructing sets of transactions that cause network-wide collisions, the short ID computation is salted on a per-link basis using 64 bits of entropy contributed by both communication partners.
-
+=
Short IDs are computed as follows:
-* Let ''salt<sub>1</sub>'' and ''salt<sub>2</sub>'' be the entropy contributed by both sides; see the "sendrecon" message further for details how they are exchanged.
+* Let ''salt<sub>1</sub>'' and ''salt<sub>2</sub>'' be the entropy contributed by both sides; see the "sendtxrcncl" message further for details how they are exchanged.
* Sort the two salts such that ''salt<sub>1</sub> &le; salt<sub>2</sub>'' (which side sent what doesn't matter).
-* Compute ''h = SHA256("Tx Relay Salting" || salt<sub>1</sub> || salt<sub>2</sub>)'', where the two salts are encoded in 64-bit little-endian byte order.
+* Compute ''h = TaggedHash("Tx Relay Salting", salt<sub>1</sub>, salt<sub>2</sub>)'', where the two salts are encoded in 64-bit little-endian byte order, and TaggedHash is specified by [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP-340].
* Let ''k<sub>0</sub>'' be the 64-bit integer obtained by interpreting the first 8 bytes of ''h'' in little-endian byte order.
* Let ''k<sub>1</sub>'' be the 64-bit integer obtained by interpreting the second 8 bytes of ''h'' in little-endian byte order.
* Let ''s = SipHash-2-4((k<sub>0</sub>,k<sub>1</sub>),wtxid)'', where ''wtxid'' is the transaction hash including witness data as defined by BIP141.
@@ -104,69 +107,67 @@ def create_sketch(shortids, capacity):
The [https://github.com/sipa/minisketch/ minisketch] library implements the construction, merging, and decoding of these sketches efficiently.
-====Truncated transaction IDs====
-
-For announcing and relaying transaction outside of reconciliation, we need an unambiguous, unsalted way to refer to transactions to deduplicate transaction requests. As we're introducing a new scheme anyway, this is a good opportunity to switch to wtxid-based requests rather than txid-based ones. While using full 256-bit wtxids is possible, this is overkill as they contribute significantly to the total bandwidth as well. Instead, we truncate the wtxid to just their first 128 bits. These are referred to as truncated IDs.
-
===Intended Protocol Flow===
Set reconciliation primarily consists of the transmission and decoding of a reconciliation set sketch upon request.
-[[File:bip-0330/recon_scheme_merged.png|framed|center|Set reconciliation protocol flow]]
+Since sketches are based on the WTXIDs, the negotiation and support of Erlay should be enabled only if both peers signal [https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki BIP-339] support.
-====Bisection====
+[[File:bip-0330/recon_scheme_merged.png|framed|center|Protocol flow]]
-If a node is unable to reconstruct the set difference from the received sketch, the node then makes an additional reconciliation request, similar to the initial one, but this request is applied to only a fraction of possible transactions (e.g., in the range 0x0–0x8). Because of the linearity of sketches, a sketch of a subset of transactions would allow the node to compute a sketch for the remainder, which saves bandwidth.
+====Sketch extension====
-[[File:bip-0330/bisection.png|framed|300px|center|Bisection]]
+If a node is unable to reconstruct the set difference from the received sketch, the node then makes a request for sketch extension. The peer would then send an extension, which is a sketch of a higher capacity (allowing to decode more differences) over the same transactions minus the sketch part which was already sent initially (to save bandwidth).
+To allow this optimization, the initiator is supposed to locally store a sketch received initially.
+This optimization is possible because extending a sketch is just concatenating new elements to an array.
===New messages===
-Several new protocol messages are added: sendrecon, reqreconcil, sketch, reqbisec, reconcildiff, invtx, gettx. This section describes their serialization, contents, and semantics.
+Several new protocol messages are added: sendtxrcncl, reqrecon, sketch, reqsketchext, reconcildiff. This section describes their serialization, contents, and semantics.
In what follows, all integers are serialized in little-endian byte order. Boolean values are encoded as a single byte that must be 0 or 1 exactly. Arrays are serialized with the CompactSize prefix that encodes their length, as is common in other P2P messages.
-====sendrecon====
-The sendrecon message announces support for the reconciliation protocol. It is expected to be only sent once, and ignored by nodes that don't support it.
+====sendtxrcncl====
+The sendtxrcncl message announces support for the reconciliation protocol. It is expected to be only sent once, and ignored by nodes that don't support it.
+
+Should be sent before "verack" and accompanied by "wtxidrelay" (in any order).
+
+If "sendtxrcncl" was sent after "verack", the sender should be disconnected.
+
+If "sendtxrcncl" was sent before "verack", but by "verack" the "wtxidrelay" message was not received,
+"sendtxrcncl" should be ignored. The connection should proceed normally, but as if reconciliation
+was not supported.
+
+Must not be sent if peer specified no support for transaction relay (fRelay=0) in "version".
+Otherwise, the sender should be disconnected.
Its payload consists of:
{|class="wikitable"
! Data type !! Name !! Description
|-
-| bool || sender || Indicates whether the sender will send "reqreconcil" message
-|-
-| bool || responder || Indicates whether the sender will respond to "reqreconcil" messages.
-|-
-| uint32 || version || Sender must set this to 1 currently, otherwise receiver should ignore the message.
+| uint32 || version || Sender must set this to 1 currently, otherwise receiver should ignore the message. v1 is the lowest protocol version, everything below that is a protocol violation.
|-
| uint64 || salt || The salt used in the short transaction ID computation.
|}
-"reqreconcil" messages can only be sent if the sender has sent a "sendrecon" message with sender=true, and the receiver has sent a "sendrecon" message with responder=true.
+After both peers have confirmed support by sending "sendtxrcncl", the initiator of the P2P connection assumes the role of reconciliation initiator (will send "reqrecon" messages) and the other peer assumes the role of reconciliation responder (will respond to "reqrecon" messages).
+"reqrecon" messages can only be sent by the reconciliation initiator.
-====reqreconcil====
-The reqreconcil message initiates a reconciliation round.
+====reqrecon====
+The reqrecon message initiates a reconciliation round.
{|class="wikitable"
! Data type !! Name !! Description
|-
| uint16 || set_size || Size of the sender's reconciliation set, used to estimate set difference.
|-
-| uint8 || q || Coefficient used to estimate set difference. Multiplied by PRECISION=2^6 and rounded up by the sender and divided by PRECISION by the receiver.
+| uint16 || q || Coefficient used to estimate set difference. Multiplied by PRECISION=(2^15) - 1 and rounded up by the sender and divided by PRECISION by the receiver.
|}
-Upon receipt of a "reqreconcil" message, the receiver:
-* Constructs and sends a "sketch" message (see below), with a sketch of capacity computed as ''|set_size - local_set_size| + q * (set_size + local_set_size) + c'', where ''local_set_size'' represents size of the receiver's reconciliation set.
+Upon receipt of a "reqrecon" message, the receiver:
+* Constructs and sends a "sketch" message (see below), with a sketch of certain ''capacity=f(set_size, local_set_size, q)'' (the exact function is suggested below), where ''local_set_size'' represents size of the receiver's reconciliation set.
* Makes a snapshot of their current reconciliation set, and clears the set itself. The snapshot is kept until a "reconcildiff" message is received by the node.
-It is suggested to use ''c=1'' to avoid sending empty sketches and reduce the overhead caused by under-estimations.
-
-Intuitively, ''q'' represents the discrepancy in sets: the closer the sets are, the lower optimal ''q'' is.
-As suggested by Erlay, ''q'' should be derived as an optimal ''q'' value for the previous reconciliation with a given peer, once the actual set sizes and set difference are known. Alternatively, ''q=0.1'' should be used as a default value.
-For example, if in previous round ''set_size=30'' and ''local_set_size=20'', and the *actual* difference was ''4'', then a node should compute ''q'' as following:
-''q=(|30-20| - 1) / (30+20)=0.18''
-The derivation of ''q'' can be changed according to the version of the protocol.
-
-No new "reqreconcil" message can be sent until a "reconcildiff" message is sent.
+No new "reqrecon" message can be sent until a "reconcildiff" message is sent.
====sketch====
The sketch message is used to communicate a sketch required to perform set reconciliation.
@@ -177,21 +178,29 @@ The sketch message is used to communicate a sketch required to perform set recon
| byte[] || skdata || The sketch of the sender's reconciliation snapshot
|}
-Upon receipt of a "sketch" message, a node computes the set difference by combining the receiver sketch with a sketch computed locally for a corresponding reconciliation set. If this is the 2nd time for this round a "sketch" message was received, the bisection approach is used, and by combining the new sketch with the previous one, two difference sketches are obtained, one for the first half and one for the second half of the short id range. The receiving node then tries to decode this sketch (or sketches), and based on the result:
-* If decoding fails, a "reconcildiff" message is sent with the failure flag set (success=false). If this was the first "sketch" in the round, a "reqbisec" message may be sent instead.
-* If decoding succeeds, a "reconcildiff" message is sent with the truncated IDs of all locally known transactions that appear in the decode result, and the short IDs of the unrecognized ones.
+The sketch message may be received in two cases.
-The receiver also makes snapshot of their current reconciliation set, and clears the set itself. The snapshot is kept until a "reconcildiff" message is sent by the node.
+1. Initial sketch. Upon receipt of a "sketch" message, a node computes the difference sketch by combining the received sketch with a sketch computed locally for a corresponding reconciliation set. The receiving node then tries to decode the difference sketch and based on the result:
+* If the decoding failed, the receiving node requests an extension sketch by sending a "reqsketchext" message. Alternatively, the node may terminate the reconciliation right away by sending a "reconcildiff" message is sent with the failure flag set (success=false).
+* If the decoding succeeded, a "reconcildiff" message with success=true.
+The receiver also makes snapshot of their current reconciliation set, and clears the set itself. The snapshot is kept until a "reconcildiff" message is sent by the node. It is needed to enable sketch extension.
-====reqbisec====
-The reqbisec message is used to signal that set reconciliation has failed and an extra sketch is needed to find set difference.
+2. Sketch extension. By combining the sketch extension with the initially received sketch, an extended sketch is obtained. The receiving node then computes the extended difference sketch by combining the received extended sketch with an extended sketch computed locally over a corresponding reconciliation set snapshot. The receiving node then tries to decode the extended difference sketch and based on the result:
+* If the decoding failed, the receiving node terminates the reconciliation right away by sending a "reconcildiff" message is sent with the failure flag set (success=false).
+* If the decoding succeeded, a "reconcildiff" message with success=true.
+
+In either cases, a "reconcildiff" with success=false should also be accompanied with announcing all transactions from the reconciliation set (or set snapshot if failed after extension) as a fallback to flooding.
+A "reconcildiff" with success=true should contain unknown short IDs of the transactions from the decoded difference, corresponding to the transactions missing on the sender's side. Known short IDs from the difference correspond to what the receiver of the message is missing, and they should be announced via an "inv" message.
+
+====reqsketchext====
+The reqsketchext message is used by reconciliation initiator to signal that initial set reconciliation has failed and a sketch extension is needed to find set difference.
It has an empty payload.
-Upon receipt of a "reqbisec" message, a node responds to it with a "sketch" message, which contains a sketch of a subset of corresponding reconciliation set <b>snapshot</b> (stored when "reqreconcil" message for the current round was processed) (values in range ''[0..(2^31)]'').
+Upon receipt of a "reqsketchext" message, a node responds to it with a "sketch" message, which contains a sketch extension: a sketch (of the same transactions sketched initially) of higher capacity without the part sent initially.
====reconcildiff====
-The reconcildiff message is used to announce transactions which are found to be missing during set reconciliation on the sender's side.
+The reconcildiff message is used by reconciliation initiator to announce transactions which are found to be missing during set reconciliation on the sender's side.
{|class="wikitable"
! Data type !! Name !! Description
@@ -201,57 +210,46 @@ The reconcildiff message is used to announce transactions which are found to be
| uint32[] || ask_shortids || The short IDs that the sender did not have.
|}
-Upon receipt a "reconcildiff" message with ''success=1'', a node sends a "invtx" message for the transactions requested by 32-bit IDs (first vector) containing their 128-bit truncated IDs (with parent transactions occuring before their dependencies), and can request announced transactions (second vector) it does not have via a "gettx" message.
-Otherwise if ''success=0'', receiver should request bisection via ''reqbisec'' (if failure happened for the first time).
-If failure happened for the second time, receiver should announce the transactions from the reconciliation set via an "invtx" message, excluding the transactions announced from the sender.
+Upon receipt a "reconcildiff" message with ''success=1'' (reconciliation success), a node sends an "inv" message for the transactions requested by 32-bit IDs (first vector) containing their wtxids (with parent transactions occuring before their dependencies).
+If ''success=0'' (reconciliation failure), receiver should announce all transactions from the reconciliation set via an "inv" message.
+In both cases, transactions the sender of the message thinks the receiver is missing are announced via an "inv" message.
+The regular "inv" deduplication should apply.
The <b>snapshot</b> of the corresponding reconciliation set is cleared by the sender and the receiver of the message.
-The sender should also send their own "invtx" message along with the reconcildiff message to announce transactions which are missing on the receiver's side.
-
-====invtx====
-The invtx message is used to announce transactions (both along with reconcildiff message and as a response to the reconcildiff message). It is the truncated ID analogue of "inv" (which cannot be used because it has 256-bit elements).
-
-{|class="wikitable"
-! Data type !! Name !! Description
-|-
-| uint128[] || inv_truncids || The truncated IDs of transactions the sender believes the receiver does not have.
-|}
-
-Upon receipt a "invtx" message, a node requests announced transactions it does not have.
-The <b>snapshot</b> of the corresponding reconciliation set is cleared by the sender of the message.
-
-====gettx====
-The gettx message is used to request transactions by 128-bit truncated IDs. It is the truncated ID analogue of "getdata".
-
-{|class="wikitable"
-! Data type !! Name !! Description
-|-
-| uint128[] || ask_truncids || The truncated IDs of transactions the sender wants the full transaction data for.
-|}
-
-Upon receipt a "gettx" message, a node sends "tx" messages for the requested transactions.
+The sender should also send their own "inv" message along with the reconcildiff message to announce transactions which are missing on the receiver's side.
==Local state==
This BIP suggests a stateful protocol and it requires storing several variables at every node to operate properly.
+====Reconciliation salt====
+When negotiating reconciliation support, peers send each other their contribution to the reconciliation salt (see how we construct short IDs above). These salts (or just the resulting salt) should be stored on both sides of the connection.
+
====Reconciliation sets====
-Every node stores a set of 128-bit truncated IDs for every peer which supports transaction reconciliation, representing the transactions which would have been sent according to the regular flooding protocol.
+Every node stores a set of wtxids for every peer which supports transaction reconciliation, representing the transactions which would have been sent according to the regular flooding protocol.
Incoming transactions are added to sets when those transactions are received (if they satisfy the policies such as minimum fee set by a peer).
A reconciliation set is moved to the corresponding set snapshot after the transmission of the initial sketch.
====Reconciliation set snapshot====
-After the transmitting of the initial sketch (either sending or receiving of reconcildiff message), every node should store the snapshot of the current reconciliation set, and clear the set.
-This is important to make bisection more stable during the reconciliation round (bisection should be applied to the snapshot).
-The snapshot is also used to efficiently lookup the transactions requested by short ID.
+After transmitting the initial sketch (either sending or receiving of the reconcildiff message), every node should store the snapshot of the current reconciliation set, and clear the set.
+This is important to make sketch extension more stable (extension should be computed over the set snapshot). Otherwise, extension would contain transactions received after sending out the initial sketch.
The snapshot is cleared after the end of the reconciliation round (sending or receiving of the reconcildiff message).
-====q-coefficient====
-The q value should be stored to make efficient difference estimation. It is shared across peers and changed after every reconciliation.
-q-coefficient represents the discrepancy in sets: the closer the sets are, the lower optimal ''q'' is.
-In future implementations, q could vary across different peers or become static.
+====Sketch capacity estimation and q-coefficient====
+
+Earlier we suggested that upon receiving a reconciliation request, a node should estimate the sketch capacity it should send: ''capacity=f(set_size, local_set_size, q)''.
+
+We suggest the following function: ''capacity=|set_size - local_set_size| + q * min(set_size, local_set_size) + c''.
+
+Intuitively, ''q'' represents the discrepancy in sets: the closer the sets are, the lower optimal ''q'' is.
+Per the Erlay paper, ''q'' should be derived as an optimal ''q'' value for the previous reconciliation with a given peer, once the actual set sizes and set difference are known.
+For example, if in previous round ''set_size=30'' and ''local_set_size=20'', and the *actual* difference was ''12'', then a node should compute ''q'' as following:
+''q=(12 - |30-20|) / min(30, 20)=0.1''
+
+The derivation of ''q'' can be changed according to the version of the protocol. For example, a static value could be chosen for simplicity. However, we suggest that ''q'' remains a parameter sent in every reconciliation request to enable future compatibility with more sophisticated (non-static) choices of this parameter.
+As for the ''c'' parameter, it is suggested to use ''c=1'' to avoid sending empty sketches and reduce the overhead caused by under-estimations.
==Backward compatibility==
@@ -261,32 +259,36 @@ Clients which do not implement this protocol remain fully compatible after this
==Rationale==
-====Why using PinSketch for set reconciliation?====
+====Why use PinSketch for set reconciliation?====
PinSketch is more bandwidth efficient than IBLT, especially for the small differences in sets we expect to operate over.
PinSketch is as bandwidth efficient as CPISync, but PinSketch has quadratic decoding complexity, while CPISync have cubic decoding complexity. This makes PinSketch significantly faster.
-====Why using 32-bit short transaction IDs?====
+====Why use 32-bit short transaction IDs?====
To use Minisketch in practice, transaction IDs should be shortened (ideally, not more than 64 bits per element).
-Small number of bits per transaction also allows to save extra bandwidth and make operations over sketches faster.
+A small number of bits per transaction also allows saving extra bandwidth and make operations over sketches faster.
According to our estimates, 32 bits provides low collision rate in a non-adversarial model (which is enabled by using independent salts per-link).
-====Why using 128-bit short IDs?====
+====Why use sketch extensions instead of bisection?====
+
+Bisection is an alternative to sketch extensions, per which a second sketch with the same initial capacity is computed over half of the txID space.
+Due to the linearity of sketches, transmitting just this one allows a reconciliation initiator to compute the sketch of the same capacity of another half. Two sketches allow the initiator to reconstruct twice as many differences as was allowed by an initial sketch.
+
+In practice this allows the initiator to amortize the bandwidth overhead of initial reconciliation failure, similarly to extension sketches, making the overhead negligible.
+
+The main benefit of sketch extensions is a much simpler implementation. Implementing bisection is hard (see [https://github.com/naumenkogs/bitcoin/commit/b5c92a41e4cc0599504cf838d20212f1a403e573 implementation]) because, in the end, we have to operate with two sketches and handle scenarios where one sketch decoded and another sketch failed.
-To avoid problems caused by the delays in the network, our protocol requires extra round of announcing unsalted transaction IDs. [https://arxiv.org/pdf/1905.10518.pdf Erlay] protocol on top of this work also requires announcing unsalted transaction IDs for flooding.
-Both of these measures allow to deduplicate transaction announcements across the peers.
-However, using full 256-bit IDs to uniquely identify transactions seems to be an overkill.
-128 is the highest power of 2 which provides good enough collision-resistance in an adversarial model, and trivially saves a significant portion of the bandwidth related to these announcements.
+It becomes even more difficult if in the future we decide to allow more than one extension/bisection. Bisection in this case have to be recursive (and spawn 4/8/16/... sketches), while for extensions we always end up with one extended sketch.
-====Why using bisection instead of extending the sketch?====
+Sketch extensions are also more flexible: extending a sketch of capacity 10 with 4 more means just computing a sketch of capacity 14 and sending the extension, while for bisection increasing the capacity to something different than 10*2/10*4/10*8/... is sophisticated implementation-wise.
-Unlike extended sketches, bisection does not require operating over sketches of higher order.
-This allows to avoid the high computational cost caused by quadratic decoding complexity.
+The only advantage of bisection is that it doesn't require computing sketches of higher capacities (exponential cost). We believe that since
+the protocol is currently designed to operate in the conditions where sketches usually have at most the capacity of 20, this efficiency is not crucial.
==Implementation==
-TODO
+https://github.com/bitcoin/bitcoin/pull/21515
==Acknowledgments==
diff --git a/bip-0330/bisection.png b/bip-0330/bisection.png
deleted file mode 100644
index 70f37e8..0000000
--- a/bip-0330/bisection.png
+++ /dev/null
Binary files differ
diff --git a/bip-0330/recon_scheme_merged.png b/bip-0330/recon_scheme_merged.png
index 11d1559..546b417 100644
--- a/bip-0330/recon_scheme_merged.png
+++ b/bip-0330/recon_scheme_merged.png
Binary files differ
diff --git a/bip-0340.mediawiki b/bip-0340.mediawiki
index a67afe3..c941916 100644
--- a/bip-0340.mediawiki
+++ b/bip-0340.mediawiki
@@ -6,7 +6,7 @@
Tim Ruffing <crypto@timruffing.de>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0340
- Status: Draft
+ Status: Final
Type: Standards Track
License: BSD-2-Clause
Created: 2020-01-19
@@ -86,7 +86,7 @@ Despite halving the size of the set of valid public keys, implicit Y coordinates
For example, without tagged hashing a BIP340 signature could also be valid for a signature scheme where the only difference is that the arguments to the hash function are reordered. Worse, if the BIP340 nonce derivation function was copied or independently created, then the nonce could be accidentally reused in the other scheme leaking the secret key.
-This proposal suggests to include the tag by prefixing the hashed data with ''SHA256(tag) || SHA256(tag)''. Because this is a 64-byte long context-specific constant and the ''SHA256'' block size is also 64 bytes, optimized implementations are possible (identical to SHA256 itself, but with a modified initial state). Using SHA256 of the tag name itself is reasonably simple and efficient for implementations that don't choose to use the optimization.
+This proposal suggests to include the tag by prefixing the hashed data with ''SHA256(tag) || SHA256(tag)''. Because this is a 64-byte long context-specific constant and the ''SHA256'' block size is also 64 bytes, optimized implementations are possible (identical to SHA256 itself, but with a modified initial state). Using SHA256 of the tag name itself is reasonably simple and efficient for implementations that don't choose to use the optimization. In general, tags can be arbitrary byte arrays, but are suggested to be textual descriptions in UTF-8 encoding.
'''Final scheme''' As a result, our final scheme ends up using public key ''pk'' which is the X coordinate of a point ''P'' on the curve whose Y coordinate is even and signatures ''(r,s)'' where ''r'' is the X coordinate of a point ''R'' whose Y coordinate is even. The signature satisfies ''s⋅G = R + tagged_hash(r || pk || m)⋅P''.
@@ -116,7 +116,7 @@ The following conventions are used, with constants as defined for [https://www.s
*** Let ''y = c<sup>(p+1)/4</sup> mod p''.
*** Fail if ''c &ne; y<sup>2</sup> mod p''.
*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y'' if ''y mod 2 = 0'' or ''y(P) = p-y'' otherwise.
-** The function ''hash<sub>tag</sub>(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''.
+** The function ''hash<sub>name</sub>(x)'' where ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)'', where ''tag'' is the UTF-8 encoding of ''name''.
==== Public Key Generation ====
@@ -138,7 +138,7 @@ As an alternative to generating keys randomly, it is also possible and safe to r
Input:
* The secret key ''sk'': a 32-byte array
-* The message ''m'': a 32-byte array
+* The message ''m'': a byte array
* Auxiliary random data ''a'': a 32-byte array
The algorithm ''Sign(sk, m)'' is defined as:
@@ -174,7 +174,7 @@ It should be noted that various alternative signing algorithms can be used to pr
Input:
* The public key ''pk'': a 32-byte array
-* The message ''m'': a 32-byte array
+* The message ''m'': a byte array
* A signature ''sig'': a 64-byte array
The algorithm ''Verify(pk, m, sig)'' is defined as:
@@ -197,7 +197,7 @@ Note that the correctness of verification relies on the fact that ''lift_x'' alw
Input:
* The number ''u'' of signatures
* The public keys ''pk<sub>1..u</sub>'': ''u'' 32-byte arrays
-* The messages ''m<sub>1..u</sub>'': ''u'' 32-byte arrays
+* The messages ''m<sub>1..u</sub>'': ''u'' byte arrays
* The signatures ''sig<sub>1..u</sub>'': ''u'' 64-byte arrays
The algorithm ''BatchVerify(pk<sub>1..u</sub>, m<sub>1..u</sub>, sig<sub>1..u</sub>)'' is defined as:
@@ -213,6 +213,50 @@ The algorithm ''BatchVerify(pk<sub>1..u</sub>, m<sub>1..u</sub>, sig<sub>1..u</s
If all individual signatures are valid (i.e., ''Verify'' would return success for them), ''BatchVerify'' will always return success. If at least one signature is invalid, ''BatchVerify'' will return success with at most a negligible probability.
+=== Usage Considerations ===
+
+==== Messages of Arbitrary Size ====
+
+The signature scheme specified in this BIP accepts byte strings of arbitrary size as input messages.<ref>In theory, the message size is restricted due to the fact that SHA256 accepts byte strings only up to size of 2^61-1 bytes.</ref>
+It is understood that implementations may reject messages which are too large in their environment or application context,
+e.g., messages which exceed predefined buffers or would otherwise cause resource exhaustion.
+
+Earlier revisions of this BIP required messages to be exactly 32 bytes.
+This restriction puts a burden on callers
+who typically need to perform pre-hashing of the actual input message by feeding it through SHA256 (or another collision-resistant cryptographic hash function)
+to create a 32-byte digest which can be passed to signing or verification
+(as for example done in [[bip-0341.mediawiki|BIP341]].)
+
+Since pre-hashing may not always be desirable,
+e.g., when actual messages are shorter than 32 bytes,<ref>Another reason to omit pre-hashing is to protect against certain types of cryptanalytic advances against the hash function used for pre-hashing: If pre-hashing is used, an attacker that can find collisions in the pre-hashing function can necessarily forge signatures under chosen-message attacks. If pre-hashing is not used, an attacker that can find collisions in SHA256 (as used inside the signature scheme) may not be able to forge signatures. However, this seeming advantage is mostly irrelevant in the context of Bitcoin, which already relies on collision resistance of SHA256 in other places, e.g., for transaction hashes.</ref>
+the restriction to 32-byte messages has been lifted.
+We note that pre-hashing is recommended for performance reasons in applications that deal with large messages.
+If large messages are not pre-hashed,
+the algorithms of the signature scheme will perform more hashing internally.
+In particular, the signing algorithm needs two sequential hashing passes over the message,
+which means that the full message must necessarily be kept in memory during signing,
+and large messages entail a runtime penalty.<ref>Typically, messages of 56 bytes or longer enjoy a performance benefit from pre-hashing, assuming the speed of SHA256 inside the signing algorithm matches that of the pre-hashing done by the calling application.</ref>
+
+==== Domain Separation ====
+
+It is good cryptographic practice to use a key pair only for a single purpose.
+Nevertheless, there may be situations in which it may be desirable to use the same key pair in multiple contexts,
+i.e., to sign different types of messages within the same application
+or even messages in entirely different applications
+(e.g., a secret key may be used to sign Bitcoin transactions as well plain text messages).
+
+As a consequence, applications should ensure that a signed application message intended for one context is never deemed valid in a different context
+(e.g., a signed plain text message should never be misinterpreted as a signed Bitcoin transaction, because this could cause unintended loss of funds).
+This is called "domain separation" and it is typically realized by partitioning the message space.
+Even if key pairs are intended to be used only within a single context,
+domain separation is a good idea because it makes it easy to add more contexts later.
+
+As a best practice, we recommend applications to use exactly one of the following methods to pre-process application messages before passing it to the signature scheme:
+* Either, pre-hash the application message using ''hash<sub>name</sub>'', where ''name'' identifies the context uniquely (e.g., "foo-app/signed-bar"),
+* or prefix the actual message with a 33-byte string that identifies the context uniquely (e.g., the UTF-8 encoding of "foo-app/signed-bar", padded with null bytes to 33 bytes).
+
+As the two pre-processing methods yield different message sizes (32 bytes vs. at least 33 bytes), there is no risk of collision between them.
+
== Applications ==
There are several interesting applications beyond simple signatures.
@@ -243,6 +287,13 @@ Blind Schnorr signatures could for example be used in [https://github.com/Elemen
For development and testing purposes, we provide a [[bip-0340/test-vectors.csv|collection of test vectors in CSV format]] and a naive, highly inefficient, and non-constant time [[bip-0340/reference.py|pure Python 3.7 reference implementation of the signing and verification algorithm]].
The reference implementation is for demonstration purposes only and not to be used in production environments.
+== Changelog ==
+
+To help implementors understand updates to this BIP, we keep a list of substantial changes.
+
+* 2022-08: Fix function signature of lift_x in reference code
+* 2023-04: Allow messages of arbitrary size
+
== Footnotes ==
<references />
diff --git a/bip-0340/reference.py b/bip-0340/reference.py
index 5b38c0a..b327e0a 100644
--- a/bip-0340/reference.py
+++ b/bip-0340/reference.py
@@ -68,8 +68,7 @@ def bytes_from_point(P: Point) -> bytes:
def xor_bytes(b0: bytes, b1: bytes) -> bytes:
return bytes(x ^ y for (x, y) in zip(b0, b1))
-def lift_x(b: bytes) -> Optional[Point]:
- x = int_from_bytes(b)
+def lift_x(x: int) -> Optional[Point]:
if x >= p:
return None
y_sq = (pow(x, 3, p) + 7) % p
@@ -97,8 +96,6 @@ def pubkey_gen(seckey: bytes) -> bytes:
return bytes_from_point(P)
def schnorr_sign(msg: bytes, seckey: bytes, aux_rand: bytes) -> bytes:
- if len(msg) != 32:
- raise ValueError('The message must be a 32-byte array.')
d0 = int_from_bytes(seckey)
if not (1 <= d0 <= n - 1):
raise ValueError('The secret key must be an integer in the range 1..n-1.')
@@ -122,13 +119,11 @@ def schnorr_sign(msg: bytes, seckey: bytes, aux_rand: bytes) -> bytes:
return sig
def schnorr_verify(msg: bytes, pubkey: bytes, sig: bytes) -> bool:
- if len(msg) != 32:
- raise ValueError('The message must be a 32-byte array.')
if len(pubkey) != 32:
raise ValueError('The public key must be a 32-byte array.')
if len(sig) != 64:
raise ValueError('The signature must be a 64-byte array.')
- P = lift_x(pubkey)
+ P = lift_x(int_from_bytes(pubkey))
r = int_from_bytes(sig[0:32])
s = int_from_bytes(sig[32:64])
if (P is None) or (r >= p) or (s >= n):
diff --git a/bip-0340/test-vectors.csv b/bip-0340/test-vectors.csv
index a1a63e1..6723391 100644
--- a/bip-0340/test-vectors.csv
+++ b/bip-0340/test-vectors.csv
@@ -14,3 +14,7 @@ index,secret key,public key,aux_rand,message,signature,verification result,comme
12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size
13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order
14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size
+15,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,,71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63,TRUE,message of size 0 (added 2022-12)
+16,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,11,08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF,TRUE,message of size 1 (added 2022-12)
+17,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,0102030405060708090A0B0C0D0E0F1011,5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5,TRUE,message of size 17 (added 2022-12)
+18,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999,403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367,TRUE,message of size 100 (added 2022-12)
diff --git a/bip-0340/test-vectors.py b/bip-0340/test-vectors.py
index d1bf6b2..317f2ec 100644
--- a/bip-0340/test-vectors.py
+++ b/bip-0340/test-vectors.py
@@ -249,6 +249,20 @@ def vector14():
return (None, pubkey, None, msg, sig, "FALSE", "public key is not a valid X coordinate because it exceeds the field size")
+def varlen_vector(msg_int):
+ seckey = bytes_from_int(int(16 * "0340", 16))
+ pubkey = pubkey_gen(seckey)
+ aux_rand = bytes_from_int(0)
+ msg = msg_int.to_bytes((msg_int.bit_length() + 7) // 8, "big")
+ sig = schnorr_sign(msg, seckey, aux_rand)
+ comment = "message of size %d (added 2022-12)"
+ return (seckey, pubkey, aux_rand, msg, sig, "TRUE", comment % len(msg))
+
+vector15 = lambda : varlen_vector(0)
+vector16 = lambda : varlen_vector(0x11)
+vector17 = lambda : varlen_vector(0x0102030405060708090A0B0C0D0E0F1011)
+vector18 = lambda : varlen_vector(int(100 * "99", 16))
+
vectors = [
vector0(),
vector1(),
@@ -264,7 +278,11 @@ vectors = [
vector11(),
vector12(),
vector13(),
- vector14()
+ vector14(),
+ vector15(),
+ vector16(),
+ vector17(),
+ vector18(),
]
# Converts the byte strings of a test vector into hex strings
diff --git a/bip-0341.mediawiki b/bip-0341.mediawiki
index fa9bb15..392ad67 100644
--- a/bip-0341.mediawiki
+++ b/bip-0341.mediawiki
@@ -7,7 +7,7 @@
Anthony Towns <aj@erisian.com.au>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0341
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-01-19
License: BSD-3-Clause
@@ -105,7 +105,7 @@ If the parameters take acceptable values, the message is the concatenation of th
** ''nLockTime'' (4): the ''nLockTime'' of the transaction.
** If the ''hash_type & 0x80'' does not equal <code>SIGHASH_ANYONECANPAY</code>:
*** ''sha_prevouts'' (32): the SHA256 of the serialization of all input outpoints.
-*** ''sha_amounts'' (32): the SHA256 of the serialization of all spent output amounts.
+*** ''sha_amounts'' (32): the SHA256 of the serialization of all input amounts.
*** ''sha_scriptpubkeys'' (32): the SHA256 of all spent outputs' ''scriptPubKeys'', serialized as script inside <code>CTxOut</code>.
*** ''sha_sequences'' (32): the SHA256 of the serialization of all input ''nSequence''.
** If ''hash_type & 3'' does not equal <code>SIGHASH_NONE</code> or <code>SIGHASH_SINGLE</code>:
@@ -182,16 +182,20 @@ def taproot_tweak_pubkey(pubkey, h):
t = int_from_bytes(tagged_hash("TapTweak", pubkey + h))
if t >= SECP256K1_ORDER:
raise ValueError
- Q = point_add(lift_x(pubkey), point_mul(G, t))
+ P = lift_x(int_from_bytes(pubkey))
+ if P is None:
+ raise ValueError
+ Q = point_add(P, point_mul(G, t))
return 0 if has_even_y(Q) else 1, bytes_from_int(x(Q))
def taproot_tweak_seckey(seckey0, h):
- P = point_mul(G, int_from_bytes(seckey0))
+ seckey0 = int_from_bytes(seckey0)
+ P = point_mul(G, seckey0)
seckey = seckey0 if has_even_y(P) else SECP256K1_ORDER - seckey0
t = int_from_bytes(tagged_hash("TapTweak", bytes_from_int(x(P)) + h))
if t >= SECP256K1_ORDER:
raise ValueError
- return (seckey + t) % SECP256K1_ORDER
+ return bytes_from_int((seckey + t) % SECP256K1_ORDER)
</source>
The following function, <code>taproot_output_script</code>, returns a byte array with the scriptPubKey (see [[bip-0141.mediawiki|BIP141]]).
@@ -244,10 +248,13 @@ TapTweak = tagged_hash("TapTweak", p + ABCDE)
'''Spending using the key path''' A Taproot output can be spent with the secret key corresponding to the <code>internal_pubkey</code>. To do so, a witness stack consists of a single element: a [[bip-0340.mediawiki|BIP340]] signature on the signature hash as defined above, with the secret key tweaked by the same <code>h</code> as in the above snippet. See the code below:
<source lang="python">
-def taproot_sign_key(script_tree, internal_seckey, hash_type):
- _, h = taproot_tree_helper(script_tree)
+def taproot_sign_key(script_tree, internal_seckey, hash_type, bip340_aux_rand):
+ if script_tree is None:
+ h = bytes()
+ else:
+ _, h = taproot_tree_helper(script_tree)
output_seckey = taproot_tweak_seckey(internal_seckey, h)
- sig = schnorr_sign(sighash(hash_type), output_seckey)
+ sig = schnorr_sign(sighash(hash_type), output_seckey, bip340_aux_rand)
if hash_type != 0:
sig += bytes([hash_type])
return [sig]
@@ -350,6 +357,6 @@ Depending on the implementation non-upgraded wallets may be able to send to Segw
== Acknowledgements ==
-This document is the result of discussions around script and signature improvements with many people, and had direct contributions from Greg Maxwell and others. It further builds on top of earlier published proposals such as Taproot by Greg Maxwell, and Merkle branch constructions by Russell O'Connor, Johnson Lau, and Mark Friedenbach.
+This document is the result of discussions around script and signature improvements with many people, and had direct contributions from Greg Maxwell and others. It further builds on top of earlier published proposals such as Taproot by Greg Maxwell, and Merkle branch constructions by Russell O'Connor, Johnson Lau, and Mark Friedenbach.
The authors wish the thank Arik Sosman for suggesting to sort Merkle node children before hashes, removing the need to transfer the position in the tree, as well as all those who provided valuable feedback and reviews, including the participants of the [https://github.com/ajtowns/taproot-review structured reviews].
diff --git a/bip-0342.mediawiki b/bip-0342.mediawiki
index bbefcaa..64d07cc 100644
--- a/bip-0342.mediawiki
+++ b/bip-0342.mediawiki
@@ -7,7 +7,7 @@
Anthony Towns <aj@erisian.com.au>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0342
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-01-19
License: BSD-3-Clause
diff --git a/bip-0343.mediawiki b/bip-0343.mediawiki
index 3d2f392..a47edc0 100644
--- a/bip-0343.mediawiki
+++ b/bip-0343.mediawiki
@@ -6,7 +6,7 @@
Michael Folkson <michaelfolkson@gmail.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0343
- Status: Proposed
+ Status: Final
Type: Standards Track
Created: 2021-04-25
License: BSD-3-Clause
diff --git a/bip-0350.mediawiki b/bip-0350.mediawiki
index 9873d80..439b2a2 100644
--- a/bip-0350.mediawiki
+++ b/bip-0350.mediawiki
@@ -5,7 +5,7 @@
Author: Pieter Wuille <pieter@wuille.net>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0350
- Status: Draft
+ Status: Final
Type: Standards Track
Created: 2020-12-16
License: BSD-2-Clause
diff --git a/bip-0351.mediawiki b/bip-0351.mediawiki
new file mode 100644
index 0000000..0a31ca8
--- /dev/null
+++ b/bip-0351.mediawiki
@@ -0,0 +1,263 @@
+<pre>
+ BIP: 351
+ Layer: Applications
+ Title: Private Payments
+ Author: Alfred Hodler <alfred_hodler@protonmail.com>
+ Clark Moody <clark@clarkmoody.com>
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0351
+ Status: Draft
+ Type: Informational
+ Created: 2022-07-10
+ License: MIT
+</pre>
+
+==Abstract==
+
+This BIP makes it possible for two parties to transact using addresses that only they can calculate. This is done using exclusively on-chain methods and in a manner that minimizes blockchain footprint. Receiving parties can share their payment codes publicly without a loss of privacy, as every sender will calculate a unique set of addresses for each payment code.
+
+==Motivation==
+
+A recipient that wishes to receive funds privately has several options. Each has tradeoffs in terms of chain analysis potential, recoverability, and wallet complexity.
+
+'''Sharing a static address''' works well enough for one-time payments between two parties as long as the address is shared through a private channel. It does not work well for recurring payments because address reuse leads to a loss of privacy. Using this method for donations exacerbates the problem since the address will serve as a focal point for data collection and analysis. Wallets must not reissue the same address to multiple recipients.
+
+'''Sharing a BIP32 extended public key''' works for recurring payments between two parties only. The same key cannot be shared to any other party without leaking the chain of payments. Furthermore, an extended public key does not say anything about address types and makes it possible for a sender to send to a script that a recipient cannot spend from. Alternate [https://github.com/satoshilabs/slips/blob/master/slip-0132.md version bytes] have been proposed to specify address types, but wallet adoption is limited.
+
+'''Sharing a BIP380 descriptor containing an extended public key''' solves the address type issue from sharing a raw BIP32 extended key. The drawback is that descriptor support is not widespread, especially in mobile wallets.
+
+'''Using a payment server''' works in the case of recipients that have the resources to set up and maintain a payment server that will generate a fresh address for each payment. These are usually businesses and the method is usually out of reach for the average user. The centralized server is vulnerable to takedown remotely and physically.
+
+'''Sharing a BIP47 payment code''' addresses most of the above shortcomings. However, it introduces the following problems:
+
+* The BIP uses a notification mechanism that relies on publicly known per-recipient notification addresses. If Alice wants to send funds to Bob, she has to use the same notification address that everyone else uses to notify Bob. If Alice is not careful with coin selection, i.e. ensuring that her notification UTXO is not linked to her, she will publicly expose herself as someone who is trying to send funds to Bob and their relationship becomes permanently visible on the blockchain.
+
+* The BIP does not say anything about address types. Receiving wallets therefore have to watch all address types that can be created from a single public key. Even then, a sender could send to a script that a receipient cannot spend from.
+
+==Method==
+
+When Alice wants to start paying Bob in private, she imports his payment code into a compatible wallet. Her wallet extracts Bob's public key from the payment code and sends a notification transaction. If Bob finds a notification transaction addressed to himself, he imports Alice's public key contained therein and stores it. Bob then performs ECDH using Alice's public key and his own private key in order to calculate a common set of addresses to watch. Alice calculates the same set of addresses on her end and uses them to send coins to Bob. If Alice engages in coin control, both the initial notification transaction and subsequent payment transactions cannot be attributed to either party. Even if Alice uses coins that are already associated with her, chain analysis will identify her as a sender but Bob's privacy will remain entirely preserved.
+
+==Specification==
+
+===Definitions===
+
+* Alice: sender
+* Bob: recipient
+* Payment code: static string that Bob generates and shares with others so that he can receive payments
+* ''P'': public key contained in Bob's payment code
+* ''p'': private key associated with Bob's public key ''P''
+* ''N'': extended public key used by Alice to derive child keys for each Bob she wants to transact with
+* ''n'': private key associated with Alice's public key ''N''
+* ''x'': Alice's secret recipient index, unique for each Bob
+* ''N<sub>x</sub>'': child public key derived from ''N'' at index ''x'' (non-hardened)
+* ''n<sub>x</sub>'': private key associated with ''N<sub>x</sub>''
+* ''c'': Alice's transaction count toward Bob
+* ''P<sub>c</sub>'': Bob's public key at index ''c''
+* ''p<sub>c</sub>'': Bob's private key at index ''c''
+* ''A<sub>c</sub>'': Bob's receive address at index ''c''
+* ''H'': SHA256 hash function
+* ''*'': EC multiplication
+* ''+'': EC addition
+* ''|'': string concatenation
+* ''[a..b]'': string slicing (inclusive of ''a'', exclusive of ''b'')
+
+===Public Key Derivation Path===
+
+The derivation path for this BIP follows BIP44. The following BIP32 path levels are defined:
+
+<code>
+m / purpose' / coin_type' / account'
+</code>
+
+<code>purpose</code> is set to 351.
+
+''(p, P)'' and ''(n, N)'' are keys associated with the above path, depending on which side is performing the calculation.
+
+''N<sub>x</sub>'' keys are the direct non-hardened children of ''N''. For instance, the path of ''N<sub>0</sub>'' from ''N'' is ''m / 0''.
+
+===Payment Code Structure and Encoding===
+
+* bytes <code>[0..2]</code>: address type flags (2 bytes)
+* bytes <code>[2..35]</code>: compressed public key P (33 bytes)
+
+Payment codes are encoded in bech32m and the human readable part is "pay" for mainnet and "payt" for testnet (all types), resulting in payment codes that look like "pay1cqqq8d29g0a7m8ghmycqk5yv24mfh3xg8ptzqcn8xz6d2tjl8ccdnfkpjl7p84".
+
+===Address Types===
+
+Address type flags determine which address types a payment code accepts. This is represented by big-endian ordered 16 bits. For instance, a hypothetical payment code that handles all address types will have all defined bits set to 1 (<code>0xffff</code>).
+
+Currently defined flags:
+
+{| class="wikitable"
+! Address Type !! Flag !! Flag Value !! Ordinal Value
+|-
+| P2PKH || <code>1 << 0</code> || <code>0x0001</code> || 0
+|-
+| P2WPKH || <code>1 << 1</code> || <code>0x0002</code> || 1
+|-
+| P2TR || <code>1 << 2</code> || <code>0x0004</code> || 2
+|}
+
+The remaining flags are reserved for future address types.
+
+While payment codes use 2-byte bitflag arrays, notifications use ordinal values in the form of a single byte.
+
+All keys are compressed. Using uncompressed keys at any point is illegal.
+
+===Notifications===
+
+Notifications are performed by publishing transactions that contain a 40-byte <code>OP_RETURN</code> output. The value of the <code>OP_RETURN</code> is constructed using the following formula:
+
+''search_key | notification_code | N<sub>x</sub> | address_type''
+
+* ''search_key'' equals "PP" and is a static ASCII-encoded string (2 bytes)
+* ''notification_code'' is ''H(n<sub>x</sub> * P)[0..4]'' (4 bytes)
+* ''N<sub>x</sub>'' is the unique public key a sender is using for a particular recipient (33 bytes)
+* ''address_type'' is the '''ordinal''' value of a single address type that a sender wants to send to (1 byte). This must be selected from the recepient's accepted address types.
+
+When Alice wants to notify Bob that he will receive future payments from her, she performs the following procedure:
+
+# Assigns an unused, unique index ''x'' to Bob (''0'' if Bob is the first party she is notifying).
+# Calculates a 4-byte notification code: ''notification_code = H(n<sub>x</sub> * P)[0..4]''
+# Commits to one of Bob's accepted address types by choosing its ordinal value. Going forward Alice must not send to address types other than the one she committed to in the notification.
+# Constructs a notification payload by concatenating the above values according to the formula.
+# Selects any UTXO in her wallet, preferably not associated with her.
+# Sends a transaction including an <code>OP_RETURN</code> output whose value is set to the constructed payload.
+
+When Bob notices a 40-byte <code>OP_RETURN</code> starting with ''search key'', he performs the following procedure:
+
+# Breaks down the payload into its four constituent parts.
+# Discards the ''search_key'' (item #0).
+# Selects ''N<sub>x</sub>'' (item #2) and performs ''H(N<sub>x</sub> * p)'' (Bob does not know the value of ''x''). Bob takes the first four bytes of the calculated value.
+# If the four bytes match the notification value (item #1), Bob found a notification addressed to himself and stores ''N<sub>x</sub>'' together with ''address_type''.
+# If this process fails for any reason, Bob assumes a spurious notification or one not addressed to himself and gives up.
+
+Since changing ''x'' yields a completely different sender identity, Alice can always re-notify Bob from a different index when she does not want to be associated with her previous identity. Alice can also re-notify Bob when she wants to start sending to a different address type. Bob must be able to update his watchlist in that case and he can stop watching addresses associated with the old address type.
+
+Out-of-band notifications between Alice and Bob are legal (in fact, they may not be prevented), but in that case Bob loses the ability to restore his wallet from <code>OP_RETURN</code> outputs embedded in the blockchain. In that case, Bob has the burden of keeping a valid backup of any out-of-band notifications.
+
+===Allowing Notification Collisions===
+
+Since ''notification_code'' is a 4-byte truncation of the full value, Bob has a 1 in ~4.3 billion chance of detecting a spurious notification. This is considered acceptable because the cost of doing so is adding a few more addresses to Bob's watchlist. The benefit of this approach is that is saves 28 bytes per notification.
+
+===Scanning Requirement===
+
+There is a scanning requirement on the recipient side in that the recipient must have access to full blocks in order to be able to search them for OP_RETURN outputs containing notifications. For more information on how light clients can get around this limitation and still use the standard, see Appendix B.
+
+Recipients that do not want to decode raw block data can quickly search for notifications in a block by looking for the following byte array: <code>[106, 40, 80, 80]</code>. The first two bytes represent ''OP_RETURN'' and ''OP_PUSHBYTES_40'', followed by the ASCII value of ''search_key''.
+
+===Transacting===
+
+Alice initializes counter ''c'' which is unique to Bob and increments with each transaction. ''c'' is a 64-bit integer and must be inputted into a hasher as a big-endian encoded array of 8 bytes.
+
+1. Alice calculates a secret point (constant between Alice and Bob):
+
+''S = n<sub>x</sub> * P''
+
+2. Alice calculates a shared secret:
+
+''s = H(S | c)''
+
+3. Alice calculates Bob's ephemeral public key and its associated address where the funds will be sent:
+
+''P<sub>c</sub> = P + s*G''
+
+4. Alice constructs an address using the key ''P<sub>c</sub>'', using one of the address types she committed to in the notification transaction.
+
+Bob constructs his watchlist by mirroring this process on his end, except that his method of calculating ''S'' is:
+
+''S = N<sub>x</sub> * p''
+
+When Bob wants to spend from such addresses, he calculates his private keys in the following manner:
+
+''p<sub>c</sub> = p + s''
+
+==Backward Compatibility==
+
+Private Payments is a new standard which is not compatible with any previous standard based on static payment codes, such as BIP47.
+
+While the standard does not support versioning, it reserves unused bits in the address type bitflag array which can be allocated to new address types once they are deemed ubiquitous. Older payment codes (i.e. those generated when fewer address types were available) are readable by software supporting new address types. The reverse is also supported since older software will ignore newer address type flags that are not understood.
+
+==Appendix A: Test Vectors==
+
+===Alice's Wallet===
+
+'''BIP32 seed:''' 0xfe
+
+'''Master xprv:''' xprv9s21ZrQH143K2qVytoy3eZSSuc1gfzFrkV4bgoHzYTkgge4UoNP62eV8jkHYNqddaaefpnjwkz71P5m4EW6RuQBJeP9pdfa9WBnjP6XUivG
+
+'''n:''' xprv9zNFGn56Wm1s89ycTCg4hB615ehu6ZvNL4mxUEAL28pNhBAb6SZgLdsgmQd1ECgAiCjy6XxTTRyBdPAhH1oMfLhv2bSwfiCYhL9s9ahEehf
+
+'''N:''' xpub6DMbgHbzM8aALe45ZED54K2jdgYPW2eDhHhZGcZwaUMMZyVjdysvtSCAcfPYiqB5Zw41EyLWPxCXko6iEckwRdF5CD2ZKdTxUKigPXsnpaE
+
+'''x:''' 0
+
+'''n<sub>x</sub>:''' be9518016ec15762877de7d2ce7367a2087cf5682e72bbffa89535d73bb42f40
+
+'''N<sub>x</sub>:''' 02e3217349724307eed5514b53b1f53f0802672a9913d9bbb76afecc86be23f464
+
+
+===Bob's Wallet===
+'''BIP32 seed:''' 0xff
+
+'''Master xprv:''' xprv9s21ZrQH143K47bRNtc26e8Gb3wkUiJ4fH3ewYgJeiGABp7vQtTKsLBzHM2fsfiK7Er6uMrWbdDwwrdcVn5TDC1T1npTFFkdEVoMgTwfVuR
+
+'''p:''' 0x26c610e7d0ed4395be3f0664073d66b0a3442b49e1ec13faf2dd9b7d3c335441
+
+'''P:''' 0x0302be8bff520f35fae3439f245c52afb9085a7bf62d099c1f5e9e1b15a7e2121a
+
+'''Accepted scripts:''' 0x03 (legacy + segwit) (0x01 | 0x02)
+
+'''Payment code:''' pay1qqpsxq4730l4yre4lt3588eyt3f2lwggtfalvtgfns04a8smzkn7yys6xv2gs8
+
+
+===Alice notifying Bob===
+'''S:''' 0x02c0892d6ba30b5b1eafebd47172e46d358721f294698f9f59b4d96b781da09a62
+
+'''Notification code:''' 0x49cb55bb
+
+'''Address type commitment:''' 1 (segwit)
+
+'''Notification output script:''' OP_RETURN OP_PUSHBYTES_40 505049cb55bb02e3217349724307eed5514b53b1f53f0802672a9913d9bbb76afecc86be23f46401
+
+
+===Alice sending to Bob===
+'''c:''' 0
+
+'''s:''' 0x5dbe5efee4a5b9df73708241858f2bf7ec65f141dbd229ea8e2f9f51804a18f2
+
+'''s*G:''' 0x039362033c1bc3f05e081d4d7f76d5ffebde349b0f6a4d2e8ffc5c065c17233247
+
+'''P<sub>c</sub>:''' 0x03e669bd1705691a080840b07d76713d040934a37f2e8dde2fe02f5d3286a49219
+
+'''A<sub>c</sub>:''' bc1qw7ld5h9tj2ruwxqvetznjfq9g5jyp0gjhrs30w
+
+
+===Bob spending===
+'''c:''' 0
+
+'''p<sub>c</sub>:''' 0x84846fe6b592fd7531af88a58ccc92a88faa1c8bbdbe3de5810d3acebc7d6d33
+
+==Appendix B: Potential OP_RETURN Services==
+
+Compact Block Filters, as formulated in BIP-0158, do not cover <code>OP_RETURN</code> data payloads. In support of light wallets, an external service could publish transaction proofs for all transactions that include the tagged notification payload. Light wallets would download all such transactions, filter for matches against their payment code, then verify the transaction proofs against the block headers obtained over the P2P network.
+
+==Appendix C: Potential Notification Transaction Services==
+
+No specific instruction is given as to the details of the notification transaction beyond simply including the single <code>OP_RETURN</code> payload. Since no restriction exists for other inputs or outputs of this transaction, there is an opportunity for an external service to include this payload in a transaction completely unrelated to Alice's wallet. Such a service could charge a fee out-of-band to help cover fees.
+
+Another opportunity exists for an existing business to attach notification payloads to transactions sent during the normal course of operations. Large withdrawal transactions from mining pools or exchanges could include a marginal notification payload without affecting overall fees.
+
+==Reference Implementation==
+
+Reference implementation is available at https://github.com/private-payments/rust-private-payments
+
+==Reference==
+* [[bip-0032.mediawiki|BIP32 - Hierarchical Deterministic Wallets]]
+* [[bip-0043.mediawiki|BIP43 - Purpose Field for Deterministic Wallets]]
+* [[bip-0044.mediawiki|BIP44 - Multi-Account Hierarchy for Deterministic Wallets]]
+* [[bip-0047.mediawiki|BIP47 - Reusable Payment Codes for Hierarchical Deterministic Wallets]]
+* [[bip-0157.mediawiki|BIP157 - Client Side Block Filtering]]
+* [[bip-0158.mediawiki|BIP158 - Compact Block Filters for Light Clients]]
+* [https://gist.github.com/RubenSomsen/21c477c90c942acf45f8e8f5c1ad4fae BIP47 Prague Discussion (acknowledgements: @rubensomsen, @afilini, @kixunil])
+
diff --git a/bip-0370.mediawiki b/bip-0370.mediawiki
index 8519473..cb59801 100644
--- a/bip-0370.mediawiki
+++ b/bip-0370.mediawiki
@@ -62,7 +62,7 @@ The new global types for PSBT Version 2 are as follows:
| <tt>PSBT_GLOBAL_TX_VERSION = 0x02</tt>
| None
| No key data
-| <tt><32-bit little endian uint version></tt>
+| <tt><32-bit little endian int version></tt>
| The 32-bit little endian signed integer representing the version number of the transaction being created. Note that this is not the same as the PSBT version number specified by the PSBT_GLOBAL_VERSION field.
| 2
| 0
@@ -166,16 +166,8 @@ The new per-input types for PSBT Version 2 are defined as follows:
| <tt>PSBT_IN_REQUIRED_HEIGHT_LOCKTIME = 0x12</tt>
| None
| No key data
-<<<<<<< HEAD
-| <tt><32-bit uiht></tt>
+| <tt><32-bit uint locktime></tt>
| 32 bit unsigned little endian integer greater than 0 and less than 500000000 representing the minimum block height that this input requires to be set as the transaction's lock time.
-||||||| b505101a
-| <tt><32-bit uiht></tt>
-| 32 bit unsigned little endian integer less than 500000000 representing the minimum block height that this input requires to be set as the transaction's lock time.
-=======
-| <tt><32-bit uiht locktime></tt>
-| 32 bit unsigned little endian integer less than 500000000 representing the minimum block height that this input requires to be set as the transaction's lock time.
->>>>>>> origin-pull/1345/head
|
| 0
| 2
@@ -243,9 +235,9 @@ PSBTv2 introduces new roles and modifies some existing roles.
===Creator===
In PSBTv2, the Creator initializes the PSBT with 0 inputs and 0 outputs.
-The PSBT version number is set to 2. The transaction version number must be set to at least 2. <ref>'''Why does the transaction version number need to be at least 2?''' The transaction version number is part of the validation rules for some features such as OP_CHECKSEQUENCEVERIFY. Since it is backwards compatible, and there are other ways to disable those features (e.g. through sequence numbers), it is easier to require transactions be able to support these features than to try to negotiate the transaction version number.</ref>
+The PSBT version number is set to 2.
The Creator should also set PSBT_GLOBAL_FALLBACK_LOCKTIME.
-If the Creator is not also a Constructor and will be giving the PSBT to others to add inputs and outputs, the PSBT_GLOBAL_TX_MODIFIABLE field must be present and and the Inputs Modifiable and Outputs Modifiable flags set appropriately.
+If the Creator is not also a Constructor and will be giving the PSBT to others to add inputs and outputs, the PSBT_GLOBAL_TX_MODIFIABLE field must be present and the Inputs Modifiable and Outputs Modifiable flags set appropriately; moreover, the transaction version number must be set to at least 2. <ref>'''Why does the transaction version number need to be at least 2?''' The transaction version number is part of the validation rules for some features such as OP_CHECKSEQUENCEVERIFY. Since it is backwards compatible, and there are other ways to disable those features (e.g. through sequence numbers), it is easier to require transactions be able to support these features than to try to negotiate the transaction version number.</ref>
If the Creator is a Constructor and no inputs and outputs will be added by other entities, PSBT_GLOBAL_TX_MODIFIABLE may be omitted.
===Constructor===
@@ -296,7 +288,7 @@ The Extractor should produce a fully valid, network serialized transaction if al
==Backwards Compatibility==
-PSBTv2 shares the same gemeric format as PSBTv0 as defined in BIP 174. Parsers for PSBTv0 should
+PSBTv2 shares the same generic format as PSBTv0 as defined in BIP 174. Parsers for PSBTv0 should
be able to deserialize PSBTv2 with only changes to support the new fields.
However PSBTv2 is incompatible with PSBTv0, and vice versa due to the use of the PSBT_GLOBAL_VERSION.
@@ -325,11 +317,11 @@ The following are invalid PSBTs:
** Base64 String: <pre>cHNidP8BAHECAAAAAQsK2SFBnByHGXNdctxzn56p4GONH+TB7vD5lECEgV/IAAAAAAD+////AgAIry8AAAAAFgAUxDD2TEdW2jENvRoIVXLvKZkmJyyLvesLAAAAABYAFKB9rIq2ypQtN57Xlfg1unHJzGiFAAAAAAEEAQIAAQBSAgAAAAHBqiVuIUuWoYIvk95Cv/O18/+NBRkwbjUV11FaXoBbEgAAAAAA/////wEYxpo7AAAAABYAFLCjrxRCCEEmk8p9FmhStS2wrvBuAAAAAAEBHxjGmjsAAAAAFgAUsKOvFEIIQSaTyn0WaFK1LbCu8G4BCGsCRzBEAiAFJ1pIVzTgrh87lxI3WG8OctyFgz0njA5HTNIxEsD6XgIgawSMg868PEHQuTzH2nYYXO29Aw0AWwgBi+K5i7rL33sBIQN2DcygXzmX3GWykwYPfynxUUyMUnBI4SgCsEHU/DQKJwAiAgLWAfhIRqZ1X3dr4A49nej7EKzJNfuDxF+wFi1MrVq3khj2nYc+VAAAgAEAAIAAAACAAAAAACoAAAAAIgIDbv4sJVYhmGVTup1lw93GQWXKFDbgWqNaTG6wJFHPeW0Y9p2HPlQAAIABAACAAAAAgAEAAABiAAAAAA==</pre>
* Case: PSBTv0 but with PSBT_GLOBAL_OUTPUT_COUNT.
-** Bytes in Hex: <pre>i70736274ff01007102000000010b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc80000000000feffffff020008af2f00000000160014c430f64c4756da310dbd1a085572ef299926272c8bbdeb0b00000000160014a07dac8ab6ca942d379ed795f835ba71c9cc68850000000001050102000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e01086b02473044022005275a485734e0ae1f3b971237586f0e72dc85833d278c0e474cd23112c0fa5e02206b048c83cebc3c41d0b93cc7da76185cedbd030d005b08018be2b98bbacbdf7b012103760dcca05f3997dc65b293060f7f29f1514c8c527048e12802b041d4fc340a2700220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a000000002202036efe2c255621986553ba9d65c3ddc64165ca1436e05aa35a4c6eb02451cf796d18f69d873e540000800100008000000080010000006200000000</pre>
+** Bytes in Hex: <pre>70736274ff01007102000000010b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc80000000000feffffff020008af2f00000000160014c430f64c4756da310dbd1a085572ef299926272c8bbdeb0b00000000160014a07dac8ab6ca942d379ed795f835ba71c9cc68850000000001050102000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e01086b02473044022005275a485734e0ae1f3b971237586f0e72dc85833d278c0e474cd23112c0fa5e02206b048c83cebc3c41d0b93cc7da76185cedbd030d005b08018be2b98bbacbdf7b012103760dcca05f3997dc65b293060f7f29f1514c8c527048e12802b041d4fc340a2700220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a000000002202036efe2c255621986553ba9d65c3ddc64165ca1436e05aa35a4c6eb02451cf796d18f69d873e540000800100008000000080010000006200000000</pre>
** Base64 String: <pre>cHNidP8BAHECAAAAAQsK2SFBnByHGXNdctxzn56p4GONH+TB7vD5lECEgV/IAAAAAAD+////AgAIry8AAAAAFgAUxDD2TEdW2jENvRoIVXLvKZkmJyyLvesLAAAAABYAFKB9rIq2ypQtN57Xlfg1unHJzGiFAAAAAAEFAQIAAQBSAgAAAAHBqiVuIUuWoYIvk95Cv/O18/+NBRkwbjUV11FaXoBbEgAAAAAA/////wEYxpo7AAAAABYAFLCjrxRCCEEmk8p9FmhStS2wrvBuAAAAAAEBHxjGmjsAAAAAFgAUsKOvFEIIQSaTyn0WaFK1LbCu8G4BCGsCRzBEAiAFJ1pIVzTgrh87lxI3WG8OctyFgz0njA5HTNIxEsD6XgIgawSMg868PEHQuTzH2nYYXO29Aw0AWwgBi+K5i7rL33sBIQN2DcygXzmX3GWykwYPfynxUUyMUnBI4SgCsEHU/DQKJwAiAgLWAfhIRqZ1X3dr4A49nej7EKzJNfuDxF+wFi1MrVq3khj2nYc+VAAAgAEAAIAAAACAAAAAACoAAAAAIgIDbv4sJVYhmGVTup1lw93GQWXKFDbgWqNaTG6wJFHPeW0Y9p2HPlQAAIABAACAAAAAgAEAAABiAAAAAA==</pre>
-* Case: PSBTv0 but with PSBT_GLOABAL_TX_MODIFIABLE.
-** Bytes in Hex: <pre><70736274ff01007102000000010b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc80000000000feffffff020008af2f00000000160014c430f64c4756da310dbd1a085572ef299926272c8bbdeb0b00000000160014a07dac8ab6ca942d379ed795f835ba71c9cc68850000000001060100000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e01086b02473044022005275a485734e0ae1f3b971237586f0e72dc85833d278c0e474cd23112c0fa5e02206b048c83cebc3c41d0b93cc7da76185cedbd030d005b08018be2b98bbacbdf7b012103760dcca05f3997dc65b293060f7f29f1514c8c527048e12802b041d4fc340a2700220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a000000002202036efe2c255621986553ba9d65c3ddc64165ca1436e05aa35a4c6eb02451cf796d18f69d873e540000800100008000000080010000006200000000/pre>
+* Case: PSBTv0 but with PSBT_GLOBAL_TX_MODIFIABLE.
+** Bytes in Hex: <pre>70736274ff01007102000000010b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc80000000000feffffff020008af2f00000000160014c430f64c4756da310dbd1a085572ef299926272c8bbdeb0b00000000160014a07dac8ab6ca942d379ed795f835ba71c9cc68850000000001060100000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e01086b02473044022005275a485734e0ae1f3b971237586f0e72dc85833d278c0e474cd23112c0fa5e02206b048c83cebc3c41d0b93cc7da76185cedbd030d005b08018be2b98bbacbdf7b012103760dcca05f3997dc65b293060f7f29f1514c8c527048e12802b041d4fc340a2700220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a000000002202036efe2c255621986553ba9d65c3ddc64165ca1436e05aa35a4c6eb02451cf796d18f69d873e540000800100008000000080010000006200000000</pre>
** Base64 String: <pre>cHNidP8BAHECAAAAAQsK2SFBnByHGXNdctxzn56p4GONH+TB7vD5lECEgV/IAAAAAAD+////AgAIry8AAAAAFgAUxDD2TEdW2jENvRoIVXLvKZkmJyyLvesLAAAAABYAFKB9rIq2ypQtN57Xlfg1unHJzGiFAAAAAAEGAQAAAQBSAgAAAAHBqiVuIUuWoYIvk95Cv/O18/+NBRkwbjUV11FaXoBbEgAAAAAA/////wEYxpo7AAAAABYAFLCjrxRCCEEmk8p9FmhStS2wrvBuAAAAAAEBHxjGmjsAAAAAFgAUsKOvFEIIQSaTyn0WaFK1LbCu8G4BCGsCRzBEAiAFJ1pIVzTgrh87lxI3WG8OctyFgz0njA5HTNIxEsD6XgIgawSMg868PEHQuTzH2nYYXO29Aw0AWwgBi+K5i7rL33sBIQN2DcygXzmX3GWykwYPfynxUUyMUnBI4SgCsEHU/DQKJwAiAgLWAfhIRqZ1X3dr4A49nej7EKzJNfuDxF+wFi1MrVq3khj2nYc+VAAAgAEAAIAAAACAAAAAACoAAAAAIgIDbv4sJVYhmGVTup1lw93GQWXKFDbgWqNaTG6wJFHPeW0Y9p2HPlQAAIABAACAAAAAgAEAAABiAAAAAA==</pre>
* Case: PSBTv0 but with PSBT_IN_PREVIOUS_TXID.
@@ -443,7 +435,7 @@ The following are valid PSBTs
** Base64 String: <pre>cHNidP8BAgQCAAAAAQQBAQEFAQIBBgEHAfsEAgAAAAABAFICAAAAAcGqJW4hS5ahgi+T3kK/87Xz/40FGTBuNRXXUVpegFsSAAAAAAD/////ARjGmjsAAAAAFgAUsKOvFEIIQSaTyn0WaFK1LbCu8G4AAAAAAQEfGMaaOwAAAAAWABSwo68UQghBJpPKfRZoUrUtsK7wbgEOIAsK2SFBnByHGXNdctxzn56p4GONH+TB7vD5lECEgV/IAQ8EAAAAAAAiAgLWAfhIRqZ1X3dr4A49nej7EKzJNfuDxF+wFi1MrVq3khj2nYc+VAAAgAEAAIAAAACAAAAAACoAAAABAwgACK8vAAAAAAEEFgAUxDD2TEdW2jENvRoIVXLvKZkmJywAIgIC42+/9T3VNAcM+P05ZhRoDzV6m4Xbc0C/HPp0XSrXs0AY9p2HPlQAAIABAACAAAAAgAEAAABkAAAAAQMIi73rCwAAAAABBBYAFE3Rk6yWSlasG54cyoRU/i9HT4UTAA==</pre>
* Case: 1 input, 2 output updated PSBTv2, with all possible PSBT_GLOBAL_TX_MODIFIABLE flags set
-** Bytes in Hex: <pre70736274ff010204020000000104010101050102010601ff01fb0402000000000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e010e200b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc8010f040000000000220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a0000000103080008af2f000000000104160014c430f64c4756da310dbd1a085572ef299926272c00220202e36fbff53dd534070cf8fd396614680f357a9b85db7340bf1cfa745d2ad7b34018f69d873e54000080010000800000008001000000640000000103088bbdeb0b0000000001041600144dd193ac964a56ac1b9e1cca8454fe2f474f851300></pre>
+** Bytes in Hex: <pre>70736274ff010204020000000104010101050102010601ff01fb0402000000000100520200000001c1aa256e214b96a1822f93de42bff3b5f3ff8d0519306e3515d7515a5e805b120000000000ffffffff0118c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e0000000001011f18c69a3b00000000160014b0a3af144208412693ca7d166852b52db0aef06e010e200b0ad921419c1c8719735d72dc739f9ea9e0638d1fe4c1eef0f9944084815fc8010f040000000000220202d601f84846a6755f776be00e3d9de8fb10acc935fb83c45fb0162d4cad5ab79218f69d873e540000800100008000000080000000002a0000000103080008af2f000000000104160014c430f64c4756da310dbd1a085572ef299926272c00220202e36fbff53dd534070cf8fd396614680f357a9b85db7340bf1cfa745d2ad7b34018f69d873e54000080010000800000008001000000640000000103088bbdeb0b0000000001041600144dd193ac964a56ac1b9e1cca8454fe2f474f851300</pre>
** Base64 String: <pre>cHNidP8BAgQCAAAAAQQBAQEFAQIBBgH/AfsEAgAAAAABAFICAAAAAcGqJW4hS5ahgi+T3kK/87Xz/40FGTBuNRXXUVpegFsSAAAAAAD/////ARjGmjsAAAAAFgAUsKOvFEIIQSaTyn0WaFK1LbCu8G4AAAAAAQEfGMaaOwAAAAAWABSwo68UQghBJpPKfRZoUrUtsK7wbgEOIAsK2SFBnByHGXNdctxzn56p4GONH+TB7vD5lECEgV/IAQ8EAAAAAAAiAgLWAfhIRqZ1X3dr4A49nej7EKzJNfuDxF+wFi1MrVq3khj2nYc+VAAAgAEAAIAAAACAAAAAACoAAAABAwgACK8vAAAAAAEEFgAUxDD2TEdW2jENvRoIVXLvKZkmJywAIgIC42+/9T3VNAcM+P05ZhRoDzV6m4Xbc0C/HPp0XSrXs0AY9p2HPlQAAIABAACAAAAAgAEAAABkAAAAAQMIi73rCwAAAAABBBYAFE3Rk6yWSlasG54cyoRU/i9HT4UTAA==</pre>
* Case: 1 input, 2 output updated PSBTv2, with all PSBTv2 fields
diff --git a/bip-0371.mediawiki b/bip-0371.mediawiki
index 452584a..599aa54 100644
--- a/bip-0371.mediawiki
+++ b/bip-0371.mediawiki
@@ -78,7 +78,7 @@ The new per-input types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_IN_TAP_BIP32_DERIVATION = 0x16</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this input. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this input. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@@ -142,7 +142,7 @@ The new per-output types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_OUT_TAP_BIP32_DERIVATION = 0x07</tt>
| <tt><32 byte xonlypubkey></tt>
-| A 32 byte X-only public key involved in this output. It may be the internal key, or a key present in a leaf script.
+| A 32 byte X-only public key involved in this output. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
diff --git a/bip-0372.mediawiki b/bip-0372.mediawiki
new file mode 100644
index 0000000..bf98b7c
--- /dev/null
+++ b/bip-0372.mediawiki
@@ -0,0 +1,191 @@
+<pre>
+ BIP: 372
+ Layer: Applications
+ Title: Pay-to-contract tweak fields for PSBT
+ Author: Maxim Orlovsky <orlovsky@lnp-bp.org>
+ Discussions-To: <bitcoin-dev@lists.linuxfoundation.org>
+ Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0372
+ Status: Draft
+ Type: Standards Track
+ Created: 2022-01-16
+ License: BSD-2-Clause
+ Requires: BIP-174
+</pre>
+
+==Introduction==
+
+===Abstract===
+
+This document proposes additional fields for BIP 174 PSBTv0 and BIP 370 PSBTv2
+that allow for pay-to-contract key tweaking data data to be included in a PSBT
+of any version. These will represent an extra-transaction information required
+for the signer to produce valid signatures spending previous outputs.
+
+===Copyright===
+
+This BIP is licensed under the 2-clause BSD license.
+
+===Background===
+
+Key tweaking is a procedure for creating a cryptographic commitment to some
+message using elliptic curve properties. The procedure uses the discrete log
+problem (DLP) to commit to an extra-transaction message. This is done by adding
+to a public key (for which the output owner knows the corresponding private key)
+a hash of the message multiplied on the generator point G of the elliptic curve.
+This produces a tweaked public key, containing the commitment. Later, in order
+to spend an output containing P2C commitment, the same commitment should be
+added to the corresponding private key.
+
+This type of commitment was originally proposed as a part of the pay to contract
+concept by Ilja Gerhardt and Timo Hanke in [1] and later used by Eternity Wall
+[2] for the same purpose. Since that time multiple different protocols for P2C
+has been developed, including OpenTimeStamps [3], Elements sidechain P2C tweaks
+[4] and LNPBP-1 [5], used in for constructing Peter Todd's single-use-seals [6]
+in client-side-validation protocols like RGB.
+
+===Motivation===
+
+P2C outputs can be detected onchain and spent only if the output owner
+not just knows the corresponding original private key, but also is aware about
+P2C tweak applied to the public key. In order to produce a valid signature, the
+same tweak value must be added (modulo group order) to the original private key
+by a signer device. This represents a challenge for external signers, which may
+not have any information about such commitment. This proposal addresses this
+issue by adding relevant fields to the PSBT input information.
+
+The proposal abstracts details of specific P2C protocols and provides universal
+method for spending previous outputs containing P2C tweaks, applied to the public
+key contained within any standard form of the <tt>scriptPubkey</tt>, including
+bare scripts and P2PK, P2PKH, P2SH, witness v0 P2WPKH, P2WSH, nested witness v0
+P2WPKH-P2SH, P2WSH-P2SH and witness v1 P2TR outputs.
+
+
+==Design==
+
+P2C-tweaked public keys are already exposed in the
+<tt>PSBT_IN_REDEEM_SCRIPT</tt>, <tt>PSBT_IN_WITNESS_SCRIPT</tt>,
+<tt>PSBT_IN_TAP_INTERNAL_KEY</tt> and <tt>PSBT_IN_TAP_LEAF_SCRIPT</tt> fields;
+the only information signer is needed to recognize which keys it should sign
+with is from which of the original keys they were generated. This is achieved by
+introducing new `PSBT_IN_P2C_TWEAK` field which has the original key as a field
+key and the tweak as a field value. The signer will recognize the keys which are
+available to it, apply the tweak to them and see in which scripts it was used --
+and use this information to apply tweaks for the corresponding private keys and
+produce valid signatures.
+
+
+==Specification==
+
+The new per-input type is defined as follows:
+
+{|
+! Name
+! <tt><keytype></tt>
+! <tt><keydata></tt>
+! <tt><keydata></tt> Description
+! <tt><valuedata></tt>
+! <tt><valuedata></tt> Description
+! Versions Requiring Inclusion
+! Versions Requiring Exclusion
+! Versions Allowing Inclusion
+|-
+| P2C Key Tweak
+| <tt>PSBT_IN_P2C_TWEAK = 0x19</tt>
+| <tt><pubkey></tt>
+| 33 bytes of compact public key serialization specifying to which of keys the
+P2C tweak may be applied (i.e. this MUST be a value of a public key before the
+tweak is applied). BIP-340 keys are serialized by appending `02`
+byte.<ref>'''Why compressed public keys are not distinguished from BIP-340
+public keys'''We follow the logic of BIP32 key derivation which does not
+performs that distinguishment. The type of the key is defined by the input type,
+and adding additional PSBT field type will just create the need for handling
+errors when the input type does not match the provided key type.</ref>
+| <tt><tweak></tt>
+| The 32 byte value which MUST be added to a private key to produce correct
+ECDSA and/or Schnorr signature ("key tweak"). Signers SHOULD remove this field
+after <tt>PSBT_IN_PARTIAL_SIG</tt> is constructed.
+|
+|
+| 0, 2
+| BIP-P2C
+|}
+
+
+==Security considerations==
+
+The scope of this proposal is deliberately kept narrow; it addresses
+only spending of transaction outputs containing P2C tweaks - and does not
+addresses construction of a new P2C commitments or transactions containing them
+in their outputs.<ref>'''Why only spending of P2C tweaked outputs is covered'''
+P2C tweaks commit to external data, some of which may represent certain value
+(like in some sidechains, single-use-seal applications like RGB etc). Creation
+of such outputs much allow hardware devices to understand the structure of such
+extra-transaction data, which may be in different formats and constantly
+involve. Thus, this should be addresses with a separate standards (or be a
+vendor-based). The current proposal only touches the question of spending an
+output which contained previously created P2C commitment, which does not creates
+a new commitment and does not provides that kind of risk of extra-blockchain
+value loses.</ref>
+
+
+==Rationale==
+
+<references/>
+
+
+==Compatibility==
+
+The proposal is compatible with the existing consensus rules and does not
+require any of their modifications.
+
+The proposed P2C PSBT fields provides sufficient information for creating a
+valid signatures for spendings of the following output types containing tweaked
+public keys:
+- bare scripts,
+- P2PK,
+- P2PKH,
+- P2SH,
+- witness v0 P2WPKH and P2WSH,
+- nested witness v0 P2WPKH-P2SH and P2WSH-P2SH,
+
+Post-0 witness versions, including taproot outputs and future witness versions,
+may not be supported or covered by this BIP and may require addition of new
+fields to the PSBT inputs.
+
+
+==Reference implementation==
+
+WIP
+
+
+==Acknowledgements==
+
+Author is grateful to Andrew Poelstra, who provided an initial set of ideas
+and information on his previous work on the topic basing on which this standard
+was designed.
+
+
+==Test vectors==
+
+TBD
+
+
+==References==
+
+[1] Ilja Gerhardt, Timo Hanke. Homomorphic Payment Addresses and the
+ Pay-to-Contract Protocol. arXiv:1212.3257 \[cs.CR\]
+ <https://arxiv.org/pdf/1212.3257.pdf>
+[2] Eternity Wall's "sign-to-contract" article.
+ <https://blog.eternitywall.com/2018/04/13/sign-to-contract/>
+[3] Peter Todd. OpenTimestamps: Scalable, Trust-Minimized, Distributed
+ Timestamping with Bitcoin.
+ <https://petertodd.org/2016/opentimestamps-announcement>
+[4] Adam Back, Matt Corallo, Luke Dashjr, et al. Enabling Blockchain
+ Innovations with Pegged Sidechains (commit5620e43). Appenxix A.
+ <https://blockstream.com/sidechains.pdf>;.
+[5] Maxim Orlovsky, Rene Pickhardt, Federico Tenga, et al. Key
+ tweaking: collision- resistant elliptic curve-based commitments.
+ LNPBP-1 Standard.
+ <https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0001.md>
+[6] Peter Todd. Single-use-seals. LNPBP-8 Standard.
+ <https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0008.md>
diff --git a/bip-0380.mediawiki b/bip-0380.mediawiki
index f870734..16a2cf4 100644
--- a/bip-0380.mediawiki
+++ b/bip-0380.mediawiki
@@ -49,6 +49,7 @@ Lastly, the use of common terminology and existing standards allow for Output Sc
Descriptors consist of several types of expressions.
The top level expression is a <tt>SCRIPT</tt>.
This expression may be followed by <tt>#CHECKSUM</tt>, where <tt>CHECKSUM</tt> is an 8 character alphanumeric descriptor checksum.
+Although the checksum is optional for parsing, applications may choose to reject descriptors that do not contain a checksum.
===Script Expressions===
diff --git a/scripts/buildtable.pl b/scripts/buildtable.pl
index 1edd8c0..292f1ee 100755
--- a/scripts/buildtable.pl
+++ b/scripts/buildtable.pl
@@ -89,7 +89,7 @@ my %DefinedLicenses = (
);
my %GrandfatheredPD = map { $_ => undef } qw(9 36 37 38 42 49 50 60 65 67 69 74 80 81 83 90 99 105 107 109 111 112 113 114 122 124 125 126 130 131 132 133 140 141 142 143 144 146 147 150 151 152);
my %TolerateMissingLicense = map { $_ => undef } qw(1 10 11 12 13 14 15 16 21 31 33 34 35 39 43 44 45 47 61 64 68 70 71 72 73 101 102 106 120 121);
-my %TolerateTitleTooLong = map { $_ => undef } qw(39 44 45 47 49 60 67 68 69 73 74 75 80 81 99 105 106 109 113 122 126 131 143 145 147 173);
+my %TolerateTitleTooLong = map { $_ => undef } qw(39 44 45 47 49 60 67 68 69 73 74 75 80 81 99 105 106 109 113 122 126 131 143 145 147 173 327);
my %emails;
@@ -125,9 +125,9 @@ while (++$bipnum <= $topbip) {
} elsif ($field eq 'Title') {
$title = $val;
my $title_len = length($title);
- die "$fn has too-long TItle ($title_len > 44 char max)" if $title_len > 44 and not exists $TolerateTitleTooLong{$bipnum};
+ die "$fn has too-long Title ($title_len > 44 char max)" if $title_len > 44 and not exists $TolerateTitleTooLong{$bipnum};
} elsif ($field eq 'Author') {
- $val =~ m/^(\S[^<@>]*\S) \<([^@>]*\@[\w.]+\.\w+)\>$/ or die "Malformed Author line in $fn";
+ $val =~ m/^(\S[^<@>]*\S) \<([^@>]*\@[\w.-]+\.\w+)\>$/ or die "Malformed Author line in $fn";
my ($authorname, $authoremail) = ($1, $2);
$authoremail =~ s/(?<=\D)$bipnum(?=\D)/<BIPNUM>/g;
$emails{$authorname}->{$authoremail} = undef;
diff --git a/scripts/diffcheck.sh b/scripts/diffcheck.sh
new file mode 100755
index 0000000..4e4c459
--- /dev/null
+++ b/scripts/diffcheck.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/after.diff || true
+if git checkout HEAD^ && scripts/buildtable.pl >/tmp/table.mediawiki 2>/dev/null; then
+ diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/before.diff || true
+ newdiff=$(diff -s /tmp/before.diff /tmp/after.diff -u | grep '^+')
+ if [ -n "$newdiff" ]; then
+ echo "$newdiff"
+ exit 1
+ fi
+else
+ echo 'Cannot build previous commit table for comparison'
+fi