aboutsummaryrefslogtreecommitdiff
path: root/lib/jsoncpp/jsoncpp/test/rununittests.py
diff options
context:
space:
mode:
Diffstat (limited to 'lib/jsoncpp/jsoncpp/test/rununittests.py')
-rw-r--r--lib/jsoncpp/jsoncpp/test/rununittests.py146
1 files changed, 73 insertions, 73 deletions
diff --git a/lib/jsoncpp/jsoncpp/test/rununittests.py b/lib/jsoncpp/jsoncpp/test/rununittests.py
index ccc54e45a3..366184cfb5 100644
--- a/lib/jsoncpp/jsoncpp/test/rununittests.py
+++ b/lib/jsoncpp/jsoncpp/test/rununittests.py
@@ -1,73 +1,73 @@
-import sys
-import os
-import os.path
-import subprocess
-from glob import glob
-import optparse
-
-VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
-
-class TestProxy(object):
- def __init__( self, test_exe_path, use_valgrind=False ):
- self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
- self.use_valgrind = use_valgrind
-
- def run( self, options ):
- if self.use_valgrind:
- cmd = VALGRIND_CMD.split()
- else:
- cmd = []
- cmd.extend( [self.test_exe_path, '--test-auto'] + options )
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
- stdout = process.communicate()[0]
- if process.returncode:
- return False, stdout
- return True, stdout
-
-def runAllTests( exe_path, use_valgrind=False ):
- test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
- status, test_names = test_proxy.run( ['--list-tests'] )
- if not status:
- print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
- return 1
- test_names = [name.strip() for name in test_names.strip().split('\n')]
- failures = []
- for name in test_names:
- print 'TESTING %s:' % name,
- succeed, result = test_proxy.run( ['--test', name] )
- if succeed:
- print 'OK'
- else:
- failures.append( (name, result) )
- print 'FAILED'
- failed_count = len(failures)
- pass_count = len(test_names) - failed_count
- if failed_count:
- print
- for name, result in failures:
- print result
- print '%d/%d tests passed (%d failure(s))' % (
- pass_count, len(test_names), failed_count)
- return 1
- else:
- print 'All %d tests passed' % len(test_names)
- return 0
-
-def main():
- from optparse import OptionParser
- parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
- parser.add_option("--valgrind",
- action="store_true", dest="valgrind", default=False,
- help="run all the tests using valgrind to detect memory leaks")
- parser.enable_interspersed_args()
- options, args = parser.parse_args()
-
- if len(args) != 1:
- parser.error( 'Must provides at least path to test_lib_json executable.' )
- sys.exit( 1 )
-
- exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
- sys.exit( exit_code )
-
-if __name__ == '__main__':
- main()
+import sys
+import os
+import os.path
+import subprocess
+from glob import glob
+import optparse
+
+VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
+
+class TestProxy(object):
+ def __init__( self, test_exe_path, use_valgrind=False ):
+ self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
+ self.use_valgrind = use_valgrind
+
+ def run( self, options ):
+ if self.use_valgrind:
+ cmd = VALGRIND_CMD.split()
+ else:
+ cmd = []
+ cmd.extend( [self.test_exe_path, '--test-auto'] + options )
+ process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ stdout = process.communicate()[0]
+ if process.returncode:
+ return False, stdout
+ return True, stdout
+
+def runAllTests( exe_path, use_valgrind=False ):
+ test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
+ status, test_names = test_proxy.run( ['--list-tests'] )
+ if not status:
+ print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
+ return 1
+ test_names = [name.strip() for name in test_names.strip().split('\n')]
+ failures = []
+ for name in test_names:
+ print 'TESTING %s:' % name,
+ succeed, result = test_proxy.run( ['--test', name] )
+ if succeed:
+ print 'OK'
+ else:
+ failures.append( (name, result) )
+ print 'FAILED'
+ failed_count = len(failures)
+ pass_count = len(test_names) - failed_count
+ if failed_count:
+ print
+ for name, result in failures:
+ print result
+ print '%d/%d tests passed (%d failure(s))' % (
+ pass_count, len(test_names), failed_count)
+ return 1
+ else:
+ print 'All %d tests passed' % len(test_names)
+ return 0
+
+def main():
+ from optparse import OptionParser
+ parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
+ parser.add_option("--valgrind",
+ action="store_true", dest="valgrind", default=False,
+ help="run all the tests using valgrind to detect memory leaks")
+ parser.enable_interspersed_args()
+ options, args = parser.parse_args()
+
+ if len(args) != 1:
+ parser.error( 'Must provides at least path to test_lib_json executable.' )
+ sys.exit( 1 )
+
+ exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
+ sys.exit( exit_code )
+
+if __name__ == '__main__':
+ main()