aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorPaul Georgiou <pavlos1998@gmail.com>2015-07-10 19:21:21 +0300
committerPaul Georgiou <pavlos1998@gmail.com>2015-07-10 19:21:21 +0300
commite3c42973b2f6fb1ceb277b2681e8ebccd6b588df (patch)
tree0b2a02e4e38c03a406e51c477db135fc8231aea1 /contrib
parent66e546577315750ea36eeb26d96842cb768b4315 (diff)
Update Linearize tool to support Windows paths
Diffstat (limited to 'contrib')
-rw-r--r--contrib/linearize/README.md6
-rwxr-xr-xcontrib/linearize/linearize-data.py12
2 files changed, 10 insertions, 8 deletions
diff --git a/contrib/linearize/README.md b/contrib/linearize/README.md
index 157586e4d4..06f278f3b3 100644
--- a/contrib/linearize/README.md
+++ b/contrib/linearize/README.md
@@ -3,7 +3,7 @@ Construct a linear, no-fork, best version of the blockchain.
## Step 1: Download hash list
- $ ./linearize-hashes.py linearize.cfg > hashlist.txt
+ $ ./linearize-hashes.py linearize.cfg > hashlist.txt
Required configuration file settings for linearize-hashes:
* RPC: rpcuser, rpcpassword
@@ -14,7 +14,7 @@ Optional config file setting for linearize-hashes:
## Step 2: Copy local block data
- $ ./linearize-data.py linearize.cfg
+ $ ./linearize-data.py linearize.cfg
Required configuration file settings:
* "input": bitcoind blocks/ directory containing blkNNNNN.dat
@@ -26,7 +26,7 @@ output.
Optional config file setting for linearize-data:
* "netmagic": network magic number
-* "max_out_sz": maximum output file size (default 1000*1000*1000)
+* "max_out_sz": maximum output file size (default `1000*1000*1000`)
* "split_timestamp": Split files when a new month is first seen, in addition to
reaching a maximum file size.
* "file_timestamp": Set each file's last-modified time to that of the
diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py
index 7947c6bf72..0f6fde2a6e 100755
--- a/contrib/linearize/linearize-data.py
+++ b/contrib/linearize/linearize-data.py
@@ -12,6 +12,7 @@ import json
import struct
import re
import os
+import os.path
import base64
import httplib
import sys
@@ -115,19 +116,20 @@ class BlockDataCopier:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
- # Extents and cache for out-of-order blocks
+ # Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
- if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
+ blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
+ if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
- self.outFn = outFn + 1
+ self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
@@ -147,7 +149,7 @@ class BlockDataCopier:
if self.fileOutput:
outFname = self.settings['output_file']
else:
- outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
+ outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
@@ -165,7 +167,7 @@ class BlockDataCopier:
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
- return "%s/blk%05d.dat" % (self.settings['input'], fn)
+ return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''