aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/blockencodings.h6
-rw-r--r--src/coins.h2
-rw-r--r--src/compressor.h4
-rw-r--r--src/hash.h2
-rw-r--r--src/miner.cpp4
-rw-r--r--src/miner.h2
-rw-r--r--src/qt/transactionfilterproxy.cpp28
-rw-r--r--src/rpc/util.cpp1
-rw-r--r--src/rpc/util.h1
-rw-r--r--src/script/bitcoinconsensus.cpp2
-rw-r--r--src/serialize.h52
-rw-r--r--src/streams.h8
-rw-r--r--src/test/serialize_tests.cpp2
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/undo.h6
-rw-r--r--src/util.cpp1
-rwxr-xr-xtest/functional/test_runner.py68
17 files changed, 83 insertions, 108 deletions
diff --git a/src/blockencodings.h b/src/blockencodings.h
index ba8c1d6a2a..f80821aa65 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -90,11 +90,11 @@ public:
while (txn.size() < txn_size) {
txn.resize(std::min((uint64_t)(1000 + txn.size()), txn_size));
for (; i < txn.size(); i++)
- READWRITE(REF(TransactionCompressor(txn[i])));
+ READWRITE(TransactionCompressor(txn[i]));
}
} else {
for (size_t i = 0; i < txn.size(); i++)
- READWRITE(REF(TransactionCompressor(txn[i])));
+ READWRITE(TransactionCompressor(txn[i]));
}
}
};
@@ -115,7 +115,7 @@ struct PrefilledTransaction {
if (idx > std::numeric_limits<uint16_t>::max())
throw std::ios_base::failure("index overflowed 16-bits");
index = idx;
- READWRITE(REF(TransactionCompressor(tx)));
+ READWRITE(TransactionCompressor(tx));
}
};
diff --git a/src/coins.h b/src/coins.h
index c6850947e2..a73f016a31 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -69,7 +69,7 @@ public:
::Unserialize(s, VARINT(code));
nHeight = code >> 1;
fCoinBase = code & 1;
- ::Unserialize(s, REF(CTxOutCompressor(out)));
+ ::Unserialize(s, CTxOutCompressor(out));
}
bool IsSpent() const {
diff --git a/src/compressor.h b/src/compressor.h
index ee26f4c533..6fcecd27e9 100644
--- a/src/compressor.h
+++ b/src/compressor.h
@@ -73,7 +73,7 @@ public:
s >> VARINT(nSize);
if (nSize < nSpecialScripts) {
std::vector<unsigned char> vch(GetSpecialSize(nSize), 0x00);
- s >> REF(CFlatData(vch));
+ s >> CFlatData(vch);
Decompress(nSize, vch);
return;
}
@@ -84,7 +84,7 @@ public:
s.ignore(nSize);
} else {
script.resize(nSize);
- s >> REF(CFlatData(script));
+ s >> CFlatData(script);
}
}
};
diff --git a/src/hash.h b/src/hash.h
index 35995a2d15..75353e0c0f 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -173,7 +173,7 @@ public:
}
template<typename T>
- CHashVerifier<Source>& operator>>(T& obj)
+ CHashVerifier<Source>& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
diff --git a/src/miner.cpp b/src/miner.cpp
index fcb376c6cb..4b86446774 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -282,7 +282,7 @@ bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_tran
return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it);
}
-void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries)
+void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
@@ -418,7 +418,7 @@ void BlockAssembler::addPackageTxs(int &nPackagesSelected, int &nDescendantsUpda
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
- SortForBlock(ancestors, iter, sortedEntries);
+ SortForBlock(ancestors, sortedEntries);
for (size_t i=0; i<sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
diff --git a/src/miner.h b/src/miner.h
index 9c086332d4..33a22ba75f 100644
--- a/src/miner.h
+++ b/src/miner.h
@@ -185,7 +185,7 @@ private:
* or if the transaction's cached data in mapTx is incorrect. */
bool SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set &mapModifiedTx, CTxMemPool::setEntries &failedTx);
/** Sort the package in an order that is valid to appear in a block */
- void SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries);
+ void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
/** Add descendants of given transactions to mapModifiedTx with ancestor
* state updated assuming given transactions are inBlock. Returns number
* of updated descendants. */
diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp
index a702461f7a..6301af7553 100644
--- a/src/qt/transactionfilterproxy.cpp
+++ b/src/qt/transactionfilterproxy.cpp
@@ -31,31 +31,35 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
{
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
- int type = index.data(TransactionTableModel::TypeRole).toInt();
- QDateTime datetime = index.data(TransactionTableModel::DateRole).toDateTime();
- bool involvesWatchAddress = index.data(TransactionTableModel::WatchonlyRole).toBool();
- QString address = index.data(TransactionTableModel::AddressRole).toString();
- QString label = index.data(TransactionTableModel::LabelRole).toString();
- QString txid = index.data(TransactionTableModel::TxHashRole).toString();
- qint64 amount = llabs(index.data(TransactionTableModel::AmountRole).toLongLong());
int status = index.data(TransactionTableModel::StatusRole).toInt();
-
- if(!showInactive && status == TransactionStatus::Conflicted)
+ if (!showInactive && status == TransactionStatus::Conflicted)
return false;
- if(!(TYPE(type) & typeFilter))
+
+ int type = index.data(TransactionTableModel::TypeRole).toInt();
+ if (!(TYPE(type) & typeFilter))
return false;
+
+ bool involvesWatchAddress = index.data(TransactionTableModel::WatchonlyRole).toBool();
if (involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_No)
return false;
if (!involvesWatchAddress && watchOnlyFilter == WatchOnlyFilter_Yes)
return false;
- if(datetime < dateFrom || datetime > dateTo)
+
+ QDateTime datetime = index.data(TransactionTableModel::DateRole).toDateTime();
+ if (datetime < dateFrom || datetime > dateTo)
return false;
+
+ QString address = index.data(TransactionTableModel::AddressRole).toString();
+ QString label = index.data(TransactionTableModel::LabelRole).toString();
+ QString txid = index.data(TransactionTableModel::TxHashRole).toString();
if (!address.contains(m_search_string, Qt::CaseInsensitive) &&
! label.contains(m_search_string, Qt::CaseInsensitive) &&
! txid.contains(m_search_string, Qt::CaseInsensitive)) {
return false;
}
- if(amount < minAmount)
+
+ qint64 amount = llabs(index.data(TransactionTableModel::AmountRole).toLongLong());
+ if (amount < minAmount)
return false;
return true;
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 593962e710..e72b1c4840 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -4,7 +4,6 @@
#include <key_io.h>
#include <keystore.h>
-#include <pubkey.h>
#include <rpc/protocol.h>
#include <rpc/util.h>
#include <tinyformat.h>
diff --git a/src/rpc/util.h b/src/rpc/util.h
index 5380d45a83..c6a79d5cf9 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -8,7 +8,6 @@
#include <pubkey.h>
#include <script/standard.h>
#include <univalue.h>
-#include <utilstrencodings.h>
#include <boost/variant/static_visitor.hpp>
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index 7d3587e2c2..8cc44b675f 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -40,7 +40,7 @@ public:
}
template<typename T>
- TxInputStream& operator>>(T& obj)
+ TxInputStream& operator>>(T&& obj)
{
::Unserialize(*this, obj);
return *this;
diff --git a/src/serialize.h b/src/serialize.h
index dcc8d8691e..c454ba16b7 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -148,8 +148,7 @@ enum
SER_GETHASH = (1 << 2),
};
-#define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action))
-#define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__))
+#define READWRITE(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__))
/**
* Implement three methods for serializable objects. These are actually wrappers over
@@ -351,10 +350,10 @@ I ReadVarInt(Stream& is)
}
}
-#define FLATDATA(obj) REF(CFlatData((char*)&(obj), (char*)&(obj) + sizeof(obj)))
-#define VARINT(obj) REF(WrapVarInt(REF(obj)))
-#define COMPACTSIZE(obj) REF(CCompactSize(REF(obj)))
-#define LIMITED_STRING(obj,n) REF(LimitedString< n >(REF(obj)))
+#define FLATDATA(obj) CFlatData((char*)&(obj), (char*)&(obj) + sizeof(obj))
+#define VARINT(obj) WrapVarInt(REF(obj))
+#define COMPACTSIZE(obj) CCompactSize(REF(obj))
+#define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj))
/**
* Wrapper for serializing arrays and POD.
@@ -539,7 +538,7 @@ inline void Serialize(Stream& os, const T& a)
}
template<typename Stream, typename T>
-inline void Unserialize(Stream& is, T& a)
+inline void Unserialize(Stream& is, T&& a)
{
a.Unserialize(is);
}
@@ -825,19 +824,6 @@ struct CSerActionUnserialize
constexpr bool ForRead() const { return true; }
};
-template<typename Stream, typename T>
-inline void SerReadWrite(Stream& s, const T& obj, CSerActionSerialize ser_action)
-{
- ::Serialize(s, obj);
-}
-
-template<typename Stream, typename T>
-inline void SerReadWrite(Stream& s, T& obj, CSerActionUnserialize ser_action)
-{
- ::Unserialize(s, obj);
-}
-
-
@@ -897,17 +883,11 @@ void SerializeMany(Stream& s)
{
}
-template<typename Stream, typename Arg>
-void SerializeMany(Stream& s, Arg&& arg)
-{
- ::Serialize(s, std::forward<Arg>(arg));
-}
-
template<typename Stream, typename Arg, typename... Args>
-void SerializeMany(Stream& s, Arg&& arg, Args&&... args)
+void SerializeMany(Stream& s, const Arg& arg, const Args&... args)
{
- ::Serialize(s, std::forward<Arg>(arg));
- ::SerializeMany(s, std::forward<Args>(args)...);
+ ::Serialize(s, arg);
+ ::SerializeMany(s, args...);
}
template<typename Stream>
@@ -915,27 +895,21 @@ inline void UnserializeMany(Stream& s)
{
}
-template<typename Stream, typename Arg>
-inline void UnserializeMany(Stream& s, Arg& arg)
-{
- ::Unserialize(s, arg);
-}
-
template<typename Stream, typename Arg, typename... Args>
-inline void UnserializeMany(Stream& s, Arg& arg, Args&... args)
+inline void UnserializeMany(Stream& s, Arg&& arg, Args&&... args)
{
::Unserialize(s, arg);
::UnserializeMany(s, args...);
}
template<typename Stream, typename... Args>
-inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, Args&&... args)
+inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, const Args&... args)
{
- ::SerializeMany(s, std::forward<Args>(args)...);
+ ::SerializeMany(s, args...);
}
template<typename Stream, typename... Args>
-inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&... args)
+inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&&... args)
{
::UnserializeMany(s, args...);
}
diff --git a/src/streams.h b/src/streams.h
index 9f86c4a163..6ba4f103da 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -42,7 +42,7 @@ public:
}
template<typename T>
- OverrideStream<Stream>& operator>>(T& obj)
+ OverrideStream<Stream>& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
@@ -399,7 +399,7 @@ public:
}
template<typename T>
- CDataStream& operator>>(T& obj)
+ CDataStream& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
@@ -543,7 +543,7 @@ public:
}
template<typename T>
- CAutoFile& operator>>(T& obj)
+ CAutoFile& operator>>(T&& obj)
{
// Unserialize from this stream
if (!file)
@@ -686,7 +686,7 @@ public:
}
template<typename T>
- CBufferedFile& operator>>(T& obj) {
+ CBufferedFile& operator>>(T&& obj) {
// Unserialize from this stream
::Unserialize(*this, obj);
return (*this);
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index 4595519435..42fd59380a 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -53,7 +53,7 @@ public:
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITEMANY(intval, boolval, stringval, FLATDATA(charstrval), txval);
+ READWRITE(intval, boolval, stringval, FLATDATA(charstrval), txval);
}
};
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 293d43c7b3..7a1d920117 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -348,7 +348,7 @@ public:
vout.assign(vAvail.size(), CTxOut());
for (unsigned int i = 0; i < vAvail.size(); i++) {
if (vAvail[i])
- ::Unserialize(s, REF(CTxOutCompressor(vout[i])));
+ ::Unserialize(s, CTxOutCompressor(vout[i]));
}
// coinbase height
::Unserialize(s, VARINT(nHeight));
diff --git a/src/undo.h b/src/undo.h
index 1f10c6652c..7aae034de2 100644
--- a/src/undo.h
+++ b/src/undo.h
@@ -54,7 +54,7 @@ public:
int nVersionDummy;
::Unserialize(s, VARINT(nVersionDummy));
}
- ::Unserialize(s, REF(CTxOutCompressor(REF(txout->out))));
+ ::Unserialize(s, CTxOutCompressor(REF(txout->out)));
}
explicit TxInUndoDeserializer(Coin* coin) : txout(coin) {}
@@ -76,7 +76,7 @@ public:
uint64_t count = vprevout.size();
::Serialize(s, COMPACTSIZE(REF(count)));
for (const auto& prevout : vprevout) {
- ::Serialize(s, REF(TxInUndoSerializer(&prevout)));
+ ::Serialize(s, TxInUndoSerializer(&prevout));
}
}
@@ -90,7 +90,7 @@ public:
}
vprevout.resize(count);
for (auto& prevout : vprevout) {
- ::Unserialize(s, REF(TxInUndoDeserializer(&prevout)));
+ ::Unserialize(s, TxInUndoDeserializer(&prevout));
}
}
};
diff --git a/src/util.cpp b/src/util.cpp
index 62cdce3012..94f829ad32 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -4,7 +4,6 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <util.h>
-#include <fs.h>
#include <chainparamsbase.h>
#include <random.h>
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 082191098e..09f7f50de0 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -52,6 +52,9 @@ if os.name == 'posix':
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
+# 20 minutes represented in seconds
+TRAVIS_TIMEOUT_DURATION = 20 * 60
+
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
@@ -233,29 +236,27 @@ def main():
sys.exit(0)
# Build list of tests
+ test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
- tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
- test_list = []
- for t in tests:
- if t in ALL_SCRIPTS:
- test_list.append(t)
+ tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
+ for test in tests:
+ if test in ALL_SCRIPTS:
+ test_list.append(test)
else:
- print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
+ print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
+ elif args.extended:
+ # Include extended tests
+ test_list += ALL_SCRIPTS
else:
- # No individual tests have been specified.
- # Run all base tests, and optionally run extended tests.
- test_list = BASE_SCRIPTS
- if args.extended:
- # place the EXTENDED_SCRIPTS first since the three longest ones
- # are there and the list is shorter
- test_list = EXTENDED_SCRIPTS + test_list
+ # Run base tests only
+ test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
- tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
- for exclude_test in tests_excl:
+ exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
+ for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
@@ -320,7 +321,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
- time0 = time.time()
+ start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
@@ -346,7 +347,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
- print_results(test_results, max_len_name, (int(time.time() - time0)))
+ print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
@@ -403,15 +404,15 @@ class TestHandler:
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
- t = self.test_list.pop(0)
+ test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
- test_argv = t.split()
+ test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
- self.jobs.append((t,
+ self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
@@ -425,15 +426,14 @@ class TestHandler:
while True:
# Return first proc that finishes
time.sleep(.5)
- for j in self.jobs:
- (name, time0, proc, testdir, log_out, log_err) = j
- if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
- # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
- # providing useful output.
+ for job in self.jobs:
+ (name, start_time, proc, testdir, log_out, log_err) = job
+ if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
+ # In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
- [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
+ [stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
@@ -442,9 +442,9 @@ class TestHandler:
else:
status = "Failed"
self.num_running -= 1
- self.jobs.remove(j)
+ self.jobs.remove(job)
- return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
+ return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
@@ -490,7 +490,7 @@ def check_script_list(src_dir):
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
- python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
+ python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
@@ -526,7 +526,7 @@ class RPCCoverage():
if uncovered:
print("Uncovered RPC commands:")
- print("".join((" - %s\n" % i) for i in sorted(uncovered)))
+ print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
@@ -550,8 +550,8 @@ class RPCCoverage():
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
- with open(coverage_ref_filename, 'r') as f:
- all_cmds.update([i.strip() for i in f.readlines()])
+ with open(coverage_ref_filename, 'r') as coverage_ref_file:
+ all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
@@ -559,8 +559,8 @@ class RPCCoverage():
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
- with open(filename, 'r') as f:
- covered_cmds.update([i.strip() for i in f.readlines()])
+ with open(filename, 'r') as coverage_file:
+ covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds