run_tests.py :  » Game-2D-3D » Pygame » pygame-1.9.1release » test » test_utils » Python Open Source

Home
Python Open Source
1.3.1.2 Python
2.Ajax
3.Aspect Oriented
4.Blog
5.Build
6.Business Application
7.Chart Report
8.Content Management Systems
9.Cryptographic
10.Database
11.Development
12.Editor
13.Email
14.ERP
15.Game 2D 3D
16.GIS
17.GUI
18.IDE
19.Installer
20.IRC
21.Issue Tracker
22.Language Interface
23.Log
24.Math
25.Media Sound Audio
26.Mobile
27.Network
28.Parser
29.PDF
30.Project Management
31.RSS
32.Search
33.Security
34.Template Engines
35.Test
36.UML
37.USB Serial
38.Web Frameworks
39.Web Server
40.Web Services
41.Web Unit
42.Wiki
43.Windows
44.XML
Python Open Source » Game 2D 3D » Pygame 
Pygame » pygame 1.9.1release » test » test_utils » run_tests.py
#################################### IMPORTS ##################################

if __name__ == '__main__':
    import sys
    sys.exit("This module is for import only")

test_pkg_name = '.'.join(__name__.split('.')[0:-2])
is_pygame_pkg = test_pkg_name == 'pygame.tests'
if is_pygame_pkg:
    from pygame.tests import test_utils
    from pygame.tests.test_utils import 
         import unittest, unittest_patch, import_submodule
    from pygame.tests.test_utils.test_runner import 
         import prepare_test_env, run_test, combine_results, test_failures, \
                get_test_results, from_namespace, TEST_RESULTS_START, \
                opt_parser
else:
    from test import test_utils
    from test.test_utils import 
         import unittest, unittest_patch, import_submodule
    from test.test_utils.test_runner import 
         import prepare_test_env, run_test, combine_results, test_failures, \
                get_test_results, from_namespace, TEST_RESULTS_START, \
                opt_parser
import pygame
import pygame.threads

import sys
import os
import re
import time
import optparse
import random
from pprint import pformat

was_run = False

def run(*args, **kwds):
    """Run the Pygame unit test suite and return (total tests run, fails dict)

    Positional arguments (optional):
    The names of tests to include. If omitted then all tests are run. Test
    names need not include the trailing '_test'.

    Keyword arguments:
    incomplete - fail incomplete tests (default False)
    nosubprocess - run all test suites in the current process
                   (default False, use separate subprocesses)
    dump - dump failures/errors as dict ready to eval (default False)
    file - if provided, the name of a file into which to dump failures/errors
    timings - if provided, the number of times to run each individual test to
              get an average run time (default is run each test once)
    exclude - A list of TAG names to exclude from the run. The items may be
              comma or space separated.
    show_output - show silenced stderr/stdout on errors (default False)
    all - dump all results, not just errors (default False)
    randomize - randomize order of tests (default False)
    seed - if provided, a seed randomizer integer
    multi_thread - if provided, the number of THREADS in which to run
                   subprocessed tests
    time_out - if subprocess is True then the time limit in seconds before
               killing a test (default 30)
    fake - if provided, the name of the fake tests package in the
           run_tests__tests subpackage to run instead of the normal
           Pygame tests
    python - the path to a python executable to run subprocessed tests
             (default sys.executable)
    interative - allow tests tagged 'interative'.

    Return value:
    A tuple of total number of tests run, dictionary of error information. The
    dictionary is empty if no errors were recorded.

    By default individual test modules are run in separate subprocesses. This
    recreates normal Pygame usage where pygame.init() and pygame.quit() are
    called only once per program execution, and avoids unfortunate
    interactions between test modules. Also, a time limit is placed on test
    execution, so frozen tests are killed when there time allotment expired.
    Use the single process option if threading is not working properly or if
    tests are taking too long. It is not guaranteed that all tests will pass
    in single process mode.

    Tests are run in a randomized order if the randomize argument is True or a
    seed argument is provided. If no seed integer is provided then the system
    time is used.

    Individual test modules may have a corresponding *_tags.py module,
    defining a __tags__ attribute, a list of tag strings used to selectively
    omit modules from a run. By default only the 'interactive', 'ignore', and
    'subprocess_ignore' tags are ignored. 'interactive' is for modules that
    take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for
    for disabling modules for foreground and subprocess modes respectively.
    These are for disabling tests on optional modules or for experimental
    modules with known problems. These modules can be run from the console as
    a Python program.

    This function can only be called once per Python session. It is not
    reentrant.

    """

    global was_run

    if was_run:
        raise RuntimeError("run() was already called this session")
    was_run = True
                           
    options = kwds.copy()
    option_nosubprocess = options.get('nosubprocess', False)
    option_dump = options.pop('dump', False)
    option_file = options.pop('file', None)
    option_all = options.pop('all', False)
    option_randomize = options.get('randomize', False)
    option_seed = options.get('seed', None)
    option_multi_thread = options.pop('multi_thread', 1)
    option_time_out = options.pop('time_out', 120)
    option_fake = options.pop('fake', None)
    option_python = options.pop('python', sys.executable)
    option_exclude = options.pop('exclude', ())
    option_interactive = options.pop('interactive', False)

    if not option_interactive and 'interactive' not in option_exclude:
        option_exclude += ('interactive',)
    if not option_nosubprocess and 'subprocess_ignore' not in option_exclude:
        option_exclude += ('subprocess_ignore',)
    elif 'ignore' not in option_exclude:
        option_exclude += ('ignore',)
    if sys.version_info < (3, 0, 0):
        option_exclude += ('python2_ignore',)
    else:
        option_exclude += ('python3_ignore',)

    main_dir, test_subdir, fake_test_subdir = prepare_test_env()
    test_runner_py = os.path.join(test_subdir, "test_utils", "test_runner.py")
    cur_working_dir = os.path.abspath(os.getcwd())

    ###########################################################################
    # Compile a list of test modules. If fake, then compile list of fake
    # xxxx_test.py from run_tests__tests

    TEST_MODULE_RE = re.compile('^(.+_test)\.py$')

    test_mods_pkg_name = test_pkg_name
    
    if option_fake is not None:
        test_mods_pkg_name = '.'.join([test_mods_pkg_name,
                                       'run_tests__tests',
                                       option_fake])
        test_subdir = os.path.join(fake_test_subdir, option_fake)
        working_dir = test_subdir
    else:
        working_dir = main_dir


    # Added in because some machines will need os.environ else there will be
    # false failures in subprocess mode. Same issue as python2.6. Needs some
    # env vars.

    test_env = os.environ

    fmt1 = '%s.%%s' % test_mods_pkg_name
    fmt2 = '%s.%%s_test' % test_mods_pkg_name
    if args:
        test_modules = [
            m.endswith('_test') and (fmt1 % m) or (fmt2 % m) for m in args
        ]
    else:
        test_modules = []
        for f in sorted(os.listdir(test_subdir)):
            for match in TEST_MODULE_RE.findall(f):
                test_modules.append(fmt1 % (match,))

    ###########################################################################
    # Remove modules to be excluded.

    tmp = test_modules
    test_modules = []
    for name in tmp:
        tag_module_name = "%s_tags" % (name[0:-5],)
        try:
            tag_module = import_submodule(tag_module_name)
        except ImportError:
            test_modules.append(name)
        else:
            try:
                tags = tag_module.__tags__
            except AttributeError:
                print ("%s has no tags: ignoring" % (tag_module_name,))
                test_module.append(name)
            else:
                for tag in tags:
                    if tag in option_exclude:
                        print ("skipping %s (tag '%s')" % (name, tag))
                        break
                else:
                    test_modules.append(name)
    del tmp, tag_module_name, name

    ###########################################################################
    # Meta results

    results = {}
    meta_results = {'__meta__' : {}}
    meta = meta_results['__meta__']

    ###########################################################################
    # Randomization

    if option_randomize or option_seed is not None:
        if option_seed is None:
            option_seed = time.time()
        meta['random_seed'] = option_seed
        print ("\nRANDOM SEED USED: %s\n" % option_seed)
        random.seed(option_seed)
        random.shuffle(test_modules)

    ###########################################################################
    # Single process mode

    if option_nosubprocess:
        unittest_patch.patch(**options)

        options['exclude'] = option_exclude
        t = time.time()
        for module in test_modules:
            results.update(run_test(module, **options))
        t = time.time() - t

    ###########################################################################
    # Subprocess mode
    #

    if not option_nosubprocess:
        if is_pygame_pkg:
            from pygame.tests.test_utils.async_sub import proc_in_time_or_kill
        else:
            from test.test_utils.async_sub import proc_in_time_or_kill

        pass_on_args = ['--exclude', ','.join(option_exclude)]
        for option in ['timings', 'seed']:
            value = options.pop(option, None)
            if value is not None:
                pass_on_args.append('--%s' % option)
                pass_on_args.append(str(value))
        for option, value in options.items():
            if value:
                pass_on_args.append('--%s' % option)

        def sub_test(module):
            print ('loading %s' % module)

            cmd = [option_python, test_runner_py, module ] + pass_on_args

            return (module,
                    (cmd, test_env, working_dir),
                    proc_in_time_or_kill(cmd, option_time_out, env=test_env,
                                         wd=working_dir))

        if option_multi_thread > 1:
            def tmap(f, args):
                return pygame.threads.tmap (
                    f, args, stop_on_error = False,
                    num_workers = option_multi_thread
                )
        else:
            tmap = map

        t = time.time()

        for module, cmd, (return_code, raw_return) in tmap(sub_test,
                                                           test_modules):
            test_file = '%s.py' % os.path.join(test_subdir, module)
            cmd, test_env, working_dir = cmd

            test_results = get_test_results(raw_return)
            if test_results:
                results.update(test_results)
            else:
                results[module] = {}

            add_to_results = [
                'return_code', 'raw_return',  'cmd', 'test_file',
                'test_env', 'working_dir', 'module',
            ]

            results[module].update(from_namespace(locals(), add_to_results))

        t = time.time() - t

    ###########################################################################
    # Output Results
    #

    untrusty_total, combined = combine_results(results, t)
    total, fails = test_failures(results)

    meta['total_tests'] = total
    meta['combined'] = combined
    results.update(meta_results)

    if option_nosubprocess:
        assert total == untrusty_total

    if not option_dump:
        print (combined)
    else:
        results = option_all and results or fails
        print (TEST_RESULTS_START)
        print (pformat(results))

    if option_file is not None:
        results_file = open(option_file, 'w')
        try:
            results_file.write(pformat(results))
        finally:
            results_file.close()

    return total, fails

###############################################################################

www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.