Imported Upstream version 4.8.10

This commit is contained in:
Mario Fetka
2021-10-03 11:06:28 +02:00
parent 10dfc9587b
commit 03a8170b15
2361 changed files with 1883897 additions and 338759 deletions

View File

@@ -0,0 +1,7 @@
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
"""
Sub-package containing unit tests for IPA internal test plugins
"""

View File

@@ -0,0 +1,99 @@
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
import pytest
@pytest.fixture
def ipa_testdir(testdir):
"""
Create conftest within testdir.
"""
testdir.makeconftest(
"""
pytest_plugins = ["ipatests.pytest_ipa.deprecated_frameworks"]
"""
)
return testdir
@pytest.fixture
def xunit_testdir(ipa_testdir):
"""
Create xnit style test module within testdir.
"""
ipa_testdir.makepyfile("""
def setup_module():
pass
def teardown_module():
pass
def setup_function():
pass
def teardown_function():
pass
class TestClass:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self):
pass
def teardown_method(self):
pass
def test_m(self):
pass
""")
return ipa_testdir
@pytest.fixture
def unittest_testdir(ipa_testdir):
"""
Create unittest style test module within testdir.
"""
ipa_testdir.makepyfile("""
import unittest
def setUpModule():
pass
def tearDownModule():
pass
class TestClass(unittest.TestCase):
@classmethod
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_m(self):
pass
""")
return ipa_testdir
def test_xunit(xunit_testdir):
result = xunit_testdir.runpytest()
result.assert_outcomes(passed=1)
result.stdout.fnmatch_lines([
"* PytestIPADeprecationWarning: xunit style is deprecated in favour of "
"fixtures style",
"* 8 warning*",
])
def test_unittest(unittest_testdir):
result = unittest_testdir.runpytest()
result.assert_outcomes(passed=1)
result.stdout.fnmatch_lines([
"* PytestIPADeprecationWarning: unittest is deprecated in favour of "
"fixtures style",
"* 1 warning*",
])

View File

@@ -0,0 +1,176 @@
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
import os
import pytest
MOD_NAME = "test_module_{}"
FUNC_NAME = "test_func_{}"
MODS_NUM = 5
@pytest.fixture
def ipatestdir(testdir, monkeypatch):
"""
Create MODS_NUM test modules within testdir/ipatests.
Each module contains 1 test function.
Patch PYTHONPATH with created package path to override the system's
ipatests
"""
ipatests_dir = testdir.mkpydir("ipatests")
for i in range(MODS_NUM):
ipatests_dir.join("{}.py".format(MOD_NAME.format(i))).write(
"def {}(): pass".format(FUNC_NAME.format(i)))
python_path = os.pathsep.join(
filter(None, [str(testdir.tmpdir), os.environ.get("PYTHONPATH", "")]))
monkeypatch.setenv("PYTHONPATH", python_path)
def run_ipa_tests(*args):
cmdargs = ["ipa-run-tests", "-v"] + list(args)
return testdir.run(*cmdargs, timeout=60)
testdir.run_ipa_tests = run_ipa_tests
return testdir
def test_ipa_run_tests_basic(ipatestdir):
"""
Run ipa-run-tests with default arguments
"""
result = ipatestdir.run_ipa_tests()
assert result.ret == 0
result.assert_outcomes(passed=MODS_NUM)
for mod_num in range(MODS_NUM):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_glob1(ipatestdir):
"""
Run ipa-run-tests using glob patterns to collect tests
"""
result = ipatestdir.run_ipa_tests("{mod}".format(
mod="test_modul[!E]?[0-5]*"))
assert result.ret == 0
result.assert_outcomes(passed=MODS_NUM)
for mod_num in range(MODS_NUM):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_glob2(ipatestdir):
"""
Run ipa-run-tests using glob patterns to collect tests
"""
result = ipatestdir.run_ipa_tests("{mod}".format(
mod="test_module_{0,1}*"))
assert result.ret == 0
result.assert_outcomes(passed=2)
for mod_num in range(2):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_specific_nodeid(ipatestdir):
"""
Run ipa-run-tests using nodeid to collect test
"""
mod_num = 0
result = ipatestdir.run_ipa_tests("{mod}.py::{func}".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num)))
assert result.ret == 0
result.assert_outcomes(passed=1)
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
@pytest.mark.parametrize(
"expr",
[["-k", "not {func}".format(func=FUNC_NAME.format(0))],
["-k not {func}".format(func=FUNC_NAME.format(0))]])
def test_ipa_run_tests_expression(ipatestdir, expr):
"""
Run ipa-run-tests using expression
"""
result = ipatestdir.run_ipa_tests(*expr)
assert result.ret == 0
result.assert_outcomes(passed=4)
for mod_num in range(1, MODS_NUM):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_empty_expression(ipatestdir):
"""
Run ipa-run-tests using an empty expression.
Expected result: all tests should pass.
"""
result = ipatestdir.run_ipa_tests('-k', '')
assert result.ret == 0
result.assert_outcomes(passed=5)
for mod_num in range(0, MODS_NUM):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_ignore_basic(ipatestdir):
"""
Run ipa-run-tests ignoring one test module
"""
result = ipatestdir.run_ipa_tests(
"--ignore", "{mod}.py".format(mod=MOD_NAME.format(0)),
"--ignore", "{mod}.py".format(mod=MOD_NAME.format(1)),
)
assert result.ret == 0
result.assert_outcomes(passed=MODS_NUM - 2)
for mod_num in range(2, MODS_NUM):
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
def test_ipa_run_tests_defaultargs(ipatestdir):
"""
Checking the ipa-run-tests defaults:
* cachedir
* rootdir
"""
mod_num = 0
result = ipatestdir.run_ipa_tests("{mod}.py::{func}".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num)))
assert result.ret == 0
result.assert_outcomes(passed=1)
result.stdout.re_match_lines([
"^cachedir: {cachedir}$".format(
cachedir=os.path.join(os.getcwd(), ".pytest_cache")),
"^rootdir: {rootdir}([,].*)?$".format(
rootdir=os.path.join(str(ipatestdir.tmpdir), "ipatests"))
])
def test_ipa_run_tests_confcutdir(ipatestdir):
"""
Checking the ipa-run-tests defaults:
* confcutdir
"""
mod_num = 0
ipatestdir.makeconftest("import somenotexistedpackage")
result = ipatestdir.run_ipa_tests("{mod}.py::{func}".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num)))
assert result.ret == 0
result.assert_outcomes(passed=1)
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])

View File

@@ -0,0 +1,127 @@
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
import glob
import pytest
MOD_NAME = "test_module_{}"
FUNC_NAME = "test_func_{}"
PYTEST_INTERNAL_ERROR = 3
MODS_NUM = 5
@pytest.fixture
def ipatestdir(testdir):
"""
Create MODS_NUM test modules within testdir.
Each module contains 1 test function.
"""
testdir.makeconftest(
"""
pytest_plugins = ["ipatests.pytest_ipa.slicing"]
"""
)
for i in range(MODS_NUM):
testdir.makepyfile(
**{MOD_NAME.format(i):
"""
def {func}():
pass
""".format(func=FUNC_NAME.format(i))
}
)
return testdir
@pytest.mark.parametrize(
"nslices,nslices_d,groups",
[(2, 0, [[x for x in range(MODS_NUM) if x % 2 == 0],
[x for x in range(MODS_NUM) if x % 2 != 0]]),
(2, 1, [[0], list(range(1, MODS_NUM))]),
(1, 0, [list(range(MODS_NUM))]),
(1, 1, [list(range(MODS_NUM))]),
(MODS_NUM, MODS_NUM, [[x] for x in range(MODS_NUM)]),
])
def test_slicing(ipatestdir, nslices, nslices_d, groups):
"""
Positive tests.
Run `nslices` slices, including `nslices_d` dedicated slices.
The `groups` is an expected result of slices grouping.
For example, there are 5 test modules. If one runs them in
two slices (without dedicated ones) the expected result will
be [[0, 2, 4], [1, 3]]. This means, that first slice will run
modules 0, 2, 4, second one - 1 and 3.
Another example, there are 5 test modules. We want to run them
in two slices. Also we specify module 0 as dedicated.
The expected result will be [[0], [1, 2, 3, 4]], which means, that
first slice will run module 0, second one - 1, 2, 3, 4.
If the given slice count is one, then this plugin does nothing.
"""
for sl in range(nslices):
args = [
"-v",
"--slices={}".format(nslices),
"--slice-num={}".format(sl + 1)
]
for dslice in range(nslices_d):
args.append(
"--slice-dedicated={}.py".format(MOD_NAME.format(dslice)))
result = ipatestdir.runpytest(*args)
assert result.ret == 0
result.assert_outcomes(passed=len(groups[sl]))
for mod_num in groups[sl]:
result.stdout.fnmatch_lines(["*{mod}.py::{func} PASSED*".format(
mod=MOD_NAME.format(mod_num),
func=FUNC_NAME.format(mod_num))])
@pytest.mark.parametrize(
"nslices,nslices_d,nslice,dmod,err_message",
[(2, 3, 1, None,
"Dedicated slice number({}) shouldn't be greater than"
" the number of slices({})".format(3, 2)),
(MODS_NUM, 0, MODS_NUM + 1, None,
"Slice number({}) shouldn't be greater than the number of slices"
"({})".format(
MODS_NUM + 1, MODS_NUM)),
(MODS_NUM + 1, 1, 1, None,
"Total number of slices({}) shouldn't be greater"
" than the number of Python test modules({})".format(
MODS_NUM + 1, MODS_NUM)),
(MODS_NUM, MODS_NUM, 1, "notexisted_module",
"The number of dedicated slices({}) should be equal to the "
"number of dedicated modules({})".format(
[], ["notexisted_module.py"])),
(MODS_NUM - 1, MODS_NUM - 1, 1, None,
"The total number of slices({}) is not sufficient to"
" run dedicated modules({}) as well as usual ones({})".format(
MODS_NUM - 1, MODS_NUM - 1, 1)),
])
def test_slicing_negative(ipatestdir, nslices, nslices_d, nslice, dmod,
err_message):
"""
Negative scenarios
"""
args = [
"-v",
"--slices={}".format(nslices),
"--slice-num={}".format(nslice)
]
if dmod is None:
for dslice in range(nslices_d):
args.append(
"--slice-dedicated={}.py".format(MOD_NAME.format(dslice)))
else:
args.append(
"--slice-dedicated={}.py".format(dmod))
result = ipatestdir.runpytest(*args)
assert result.ret == PYTEST_INTERNAL_ERROR
result.assert_outcomes()
result.stdout.fnmatch_lines(["*ValueError: {err_message}*".format(
err_message=glob.escape(err_message))])