test_runner.py :  » Game-2D-3D » Pygame » pygame-1.9.1release » test » test_utils » Python Open Source

Home
Python Open Source
1.3.1.2 Python
2.Ajax
3.Aspect Oriented
4.Blog
5.Build
6.Business Application
7.Chart Report
8.Content Management Systems
9.Cryptographic
10.Database
11.Development
12.Editor
13.Email
14.ERP
15.Game 2D 3D
16.GIS
17.GUI
18.IDE
19.Installer
20.IRC
21.Issue Tracker
22.Language Interface
23.Log
24.Math
25.Media Sound Audio
26.Mobile
27.Network
28.Parser
29.PDF
30.Project Management
31.RSS
32.Search
33.Security
34.Template Engines
35.Test
36.UML
37.USB Serial
38.Web Frameworks
39.Web Server
40.Web Services
41.Web Unit
42.Wiki
43.Windows
44.XML
Python Open Source » Game 2D 3D » Pygame 
Pygame » pygame 1.9.1release » test » test_utils » test_runner.py
################################################################################

if __name__ == '__main__':
    
    import sys
    import os
    pkg_dir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
    parent_dir, pkg_name = os.path.split(pkg_dir)
    is_pygame_pkg = (pkg_name == 'tests' and
                     os.path.split(parent_dir)[1] == 'pygame')
    if not is_pygame_pkg:
        sys.path.insert(0, parent_dir)
else:
    is_pygame_pkg = __name__.startswith('pygame.tests.')

if is_pygame_pkg:
    from pygame.tests import test_utils
    from pygame.tests.test_utils import 
         import unittest, unittest_patch, import_submodule
    from pygame.tests.test_utils.unittest_patch import StringIOContents
else:
    from test import test_utils
    from test.test_utils import 
         import unittest, unittest_patch, import_submodule
    from test.test_utils.unittest_patch import StringIOContents

import sys
import os
import re
try:
    import StringIO
except ImportError:
    import io as StringIO
import time
import optparse
from inspect import getdoc,getmembers,isclass
from pprint import pformat

# from safe_eval import safe_eval as eval

################################################################################

def prepare_test_env():
    test_subdir = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
    main_dir = os.path.split(test_subdir)[0]
    sys.path.insert(0, test_subdir)
    fake_test_subdir = os.path.join(test_subdir, 'run_tests__tests')
    return main_dir, test_subdir, fake_test_subdir

main_dir, test_subdir, fake_test_subdir = prepare_test_env()

################################################################################
# Set the command line options
#
# options are shared with run_tests.py so make sure not to conflict
# in time more will be added here

TAG_PAT = r'-?[a-zA-Z0-9_]+'
TAG_RE = re.compile(TAG_PAT)
EXCLUDE_RE = re.compile("(%s,?\s*)+$" % (TAG_PAT,))

def exclude_callback(option, opt, value, parser):
    if EXCLUDE_RE.match(value) is None:
        raise opt_parser.OptionValueError("%s argument has invalid value" %
                                          (opt,))
    parser.values.exclude = TAG_RE.findall(value)

opt_parser = optparse.OptionParser()

opt_parser.add_option (
     "-i",  "--incomplete", action = 'store_true',
     help   = "fail incomplete tests" )

opt_parser.add_option (
     "-n",  "--nosubprocess", action = "store_true",
     help   = "run everything in a single process "
              " (default: use subprocesses)" )

opt_parser.add_option (
     "-T",  "--timings", type = 'int', default = 1, metavar = 'T',
     help   = "get timings for individual tests.\n" 
              "Run test T times, giving average time")

opt_parser.add_option (
     "-e",  "--exclude",
     action = 'callback',
     type   = 'string',
     help   = "exclude tests containing any of TAGS",
     callback = exclude_callback)

opt_parser.add_option (
     "-w",  "--show_output", action = 'store_true',
     help   = "show silenced stderr/stdout on errors" )

opt_parser.add_option (
     "-r",  "--randomize", action = 'store_true',
     help   = "randomize order of tests" )

opt_parser.add_option (
     "-S",  "--seed", type = 'int',
     help   = "seed randomizer" )

################################################################################
# If an xxxx_test.py takes longer than TIME_OUT seconds it will be killed
# This is only the default, can be over-ridden on command line

TIME_OUT = 30

# DEFAULTS

################################################################################
# Human readable output
#

COMPLETE_FAILURE_TEMPLATE = """
======================================================================
ERROR: all_tests_for (%(module)s.AllTestCases)
----------------------------------------------------------------------
Traceback (most recent call last):
  File "test/%(module)s.py", line 1, in all_tests_for
subprocess completely failed with return code of %(return_code)s
cmd:          %(cmd)s
test_env:     %(test_env)s
working_dir:  %(working_dir)s
return (top 5 lines):
%(raw_return)s

"""  # Leave that last empty line else build page regex won't match
     # Text also needs to be vertically compressed
    

RAN_TESTS_DIV = (70 * "-") + "\nRan"

DOTS = re.compile("^([FE.]*)$", re.MULTILINE)

def combine_results(all_results, t):
    """

    Return pieced together results in a form fit for human consumption. Don't
    rely on results if  piecing together subprocessed  results (single process
    mode is fine). Was originally meant for that  purpose but was found to be
    unreliable.  See the dump option for reliable results.

    """

    all_dots = ''
    failures = []

    for module, results in sorted(all_results.items()):
        output, return_code, raw_return = map (
            results.get, ('output','return_code', 'raw_return')
        )

        if not output or (return_code and RAN_TESTS_DIV not in output):
            # would this effect the original dict? TODO
            results['raw_return'] = ''.join(raw_return.splitlines(1)[:5])
            failures.append( COMPLETE_FAILURE_TEMPLATE % results )
            all_dots += 'E'
            continue

        dots = DOTS.search(output).group(1)
        all_dots += dots

        if 'E' in dots or 'F' in dots:
            failures.append( output[len(dots)+1:].split(RAN_TESTS_DIV)[0] )
    
    total_fails, total_errors = map(all_dots.count, 'FE')
    total_tests = len(all_dots)

    combined = [all_dots]
    if failures: combined += [''.join(failures).lstrip('\n')[:-1]]
    combined += ["%s %s tests in %.3fs\n" % (RAN_TESTS_DIV, total_tests, t)]

    if not failures: combined += ['OK\n']
    else: combined += [
        'FAILED (%s)\n' % ', '.join (
            (total_fails  and ["failures=%s" % total_fails] or []) +
            (total_errors and ["errors=%s"  % total_errors] or [])
        )]

    return total_tests, '\n'.join(combined)

################################################################################

TEST_RESULTS_START = "<--!! TEST RESULTS START HERE !!-->"
TEST_RESULTS_RE = re.compile('%s\n(.*)' % TEST_RESULTS_START, re.DOTALL | re.M)

def get_test_results(raw_return):
    test_results = TEST_RESULTS_RE.search(raw_return)
    if test_results:
        try:
            return eval(test_results.group(1))
        except:
            print ("BUGGY TEST RESULTS EVAL:\n %s" % test_results.group(1))
            raise

################################################################################
# ERRORS
# TODO

def make_complete_failure_error(result):
    return (
        "ERROR: all_tests_for (%s.AllTestCases)" % result['module'],
        "Complete Failure (ret code: %s)" % result['return_code'],
        result['test_file'], 
        '1',
    )
    
# For combined results, plural
def test_failures(results):
    errors = {}
    total =  sum([v.get('num_tests', 0) for v in results.values()])
    for module, result in results.items():
        num_errors = (
            len(result.get('failures', [])) + len(result.get('errors', []))
        )
        if num_errors is 0 and result.get('return_code'):
            result.update(RESULTS_TEMPLATE)
            result['errors'].append(make_complete_failure_error(result))
            num_errors += 1
            total += 1
        if num_errors: errors.update({module:result})

    return total, errors

# def combined_errs(results):
#     for result in results.values():
#         combined_errs = result['errors'] + result['failures']
#         for err in combined_errs:
#             yield err

################################################################################
# For complete failures (+ namespace saving)

def from_namespace(ns, template):
    if isinstance(template, dict):
        return dict([(i, ns.get(i, template[i])) for i in template])
    return dict([(i, ns[i]) for i in template])

RESULTS_TEMPLATE = {
    'output'     :  '',
    'num_tests'  :   0,
    'failures'   :  [],
    'errors'     :  [],
    'tests'      :  {},
}

################################################################################

def run_test(module, **kwds):
    """Run a unit test module

    Recognized keyword arguments:
    incomplete, nosubprocess

    """
    
    option_incomplete = kwds.get('incomplete', False)
    option_nosubprocess = kwds.get('nosubprocess', False)

    suite = unittest.TestSuite()
    test_utils.fail_incomplete_tests = option_incomplete

    m = import_submodule(module)
    if m.unittest is not unittest:
        raise ImportError(
            "%s is not using correct unittest\n\n" % module +
            "should be: %s\n is using: %s" % (unittest.__file__,
                                              m.unittest.__file__)
        )
    
    print ('loading %s' % module)

    test = unittest.defaultTestLoader.loadTestsFromName(module)
    suite.addTest(test)

    output = StringIO.StringIO()
    runner = unittest.TextTestRunner(stream=output)

    results = runner.run(suite)
    output  = StringIOContents(output)

    num_tests = results.testsRun
    failures  = results.failures
    errors    = results.errors
    tests     = results.tests

    results   = {module:from_namespace(locals(), RESULTS_TEMPLATE)}

    if not option_nosubprocess:
        print (TEST_RESULTS_START)
        print (pformat(results))
    else:
        return results

################################################################################

if __name__ == '__main__':
    options, args = opt_parser.parse_args()
    unittest_patch.patch(incomplete=options.incomplete,
                         randomize=options.randomize,
                         seed=options.seed,
                         exclude=options.exclude,
                         timings=options.timings,
                         show_output=options.show_output)
    if not args:
        
        if is_pygame_pkg:
            run_from = 'pygame.tests.go'
        else:
            run_from = os.path.join(main_dir, 'run_tests.py')
        sys.exit('No test module provided; consider using %s instead' % run_from)
    run_test(args[0],
             incomplete=options.incomplete,
             nosubprocess=options.nosubprocess)

################################################################################

www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.