Imported Upstream version 4.8.10
This commit is contained in:
0
ipatests/pytest_ipa/__init__.py
Normal file
0
ipatests/pytest_ipa/__init__.py
Normal file
8
ipatests/pytest_ipa/additional_config.py
Normal file
8
ipatests/pytest_ipa/additional_config.py
Normal file
@@ -0,0 +1,8 @@
|
||||
#
|
||||
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--no-pretty-print", action="store_false",
|
||||
dest="pretty_print", help="Don't pretty-print structures")
|
||||
55
ipatests/pytest_ipa/beakerlib.py
Normal file
55
ipatests/pytest_ipa/beakerlib.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# Copyright (C) 2014 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
"""Test integration with BeakerLib
|
||||
|
||||
IPA-specific configuration for the BeakerLib plugin (from pytest-beakerlib).
|
||||
If the plugin is active, sets up IPA logging to also log to Beaker.
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from ipapython.ipa_log_manager import Formatter
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
plugin = config.pluginmanager.getplugin('BeakerLibPlugin')
|
||||
if plugin:
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
handler = BeakerLibLogHandler(plugin.run_beakerlib_command)
|
||||
handler.setLevel(logging.INFO)
|
||||
handler.setFormatter(Formatter('[%(name)s] %(message)s'))
|
||||
root_logger.addHandler(handler)
|
||||
|
||||
|
||||
class BeakerLibLogHandler(logging.Handler):
|
||||
def __init__(self, beakerlib_command):
|
||||
super(BeakerLibLogHandler, self).__init__()
|
||||
self.beakerlib_command = beakerlib_command
|
||||
|
||||
def emit(self, record):
|
||||
command = {
|
||||
'DEBUG': 'rlLogDebug',
|
||||
'INFO': 'rlLogInfo',
|
||||
'WARNING': 'rlLogWarning',
|
||||
'ERROR': 'rlLogError',
|
||||
'CRITICAL': 'rlLogFatal',
|
||||
}.get(record.levelname, 'rlLog')
|
||||
self.beakerlib_command([command, self.format(record)])
|
||||
46
ipatests/pytest_ipa/declarative.py
Normal file
46
ipatests/pytest_ipa/declarative.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2014 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Pytest plugin for Declarative tests"""
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
"""Generates Declarative tests"""
|
||||
if 'declarative_test_definition' in metafunc.fixturenames:
|
||||
tests = []
|
||||
descriptions = []
|
||||
for i, test in enumerate(metafunc.cls.tests):
|
||||
if callable(test):
|
||||
description = '%s: %s' % (
|
||||
str(i).zfill(4),
|
||||
test.__name__, # test is not a dict. pylint: disable=E1103
|
||||
)
|
||||
else:
|
||||
description = '%s: %s: %s' % (str(i).zfill(4),
|
||||
test['command'][0],
|
||||
test.get('desc', ''))
|
||||
test = dict(test)
|
||||
test['nice'] = description
|
||||
tests.append(test)
|
||||
descriptions.append(description)
|
||||
metafunc.parametrize(
|
||||
['index', 'declarative_test_definition'],
|
||||
enumerate(tests),
|
||||
ids=descriptions,
|
||||
)
|
||||
65
ipatests/pytest_ipa/deprecated_frameworks.py
Normal file
65
ipatests/pytest_ipa/deprecated_frameworks.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#
|
||||
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
"""Warns about xunit/unittest/nose tests.
|
||||
|
||||
FreeIPA is a rather old project and hereby includes all the most
|
||||
famous in the past and present Python test idioms. Of course,
|
||||
this is difficult to play around all of them. For now, the runner
|
||||
of the IPA's test suite is Pytest.
|
||||
|
||||
Pytest supports xunit style setups, unittest, nose tests. But this
|
||||
support is limited and may be dropped in the future releases.
|
||||
Worst of all is that the mixing of various test frameworks results
|
||||
in weird conflicts and of course, is not widely tested. In other
|
||||
words, there is a big risk. To eliminate this risk and to not pin
|
||||
Pytest to 3.x branch IPA's tests were refactored.
|
||||
|
||||
This plugin is intended to issue warnings on collecting tests,
|
||||
which employ unittest/nose frameworks or xunit style.
|
||||
|
||||
To treat these warnings as errors it's enough to run Pytest with:
|
||||
|
||||
-W error:'xunit style is deprecated':pytest.PytestIPADeprecationWarning
|
||||
|
||||
"""
|
||||
from unittest import TestCase
|
||||
|
||||
from ipatests.conftest import PytestIPADeprecationWarning
|
||||
|
||||
forbidden_module_scopes = [
|
||||
'setup_module',
|
||||
'setup_function',
|
||||
'teardown_module',
|
||||
'teardown_function',
|
||||
]
|
||||
|
||||
forbidden_class_scopes = [
|
||||
'setup_class',
|
||||
'setup_method',
|
||||
'teardown_class',
|
||||
'teardown_method',
|
||||
]
|
||||
|
||||
|
||||
def pytest_collection_finish(session):
|
||||
for item in session.items:
|
||||
cls = getattr(item, 'cls', None)
|
||||
if cls is not None and issubclass(cls, TestCase):
|
||||
item.warn(PytestIPADeprecationWarning(
|
||||
"unittest is deprecated in favour of fixtures style"))
|
||||
continue
|
||||
|
||||
def xunit_depr_warn(item, attr, names):
|
||||
for n in names:
|
||||
obj = getattr(item, attr, None)
|
||||
method = getattr(obj, n, None)
|
||||
fixtured = hasattr(method, '__pytest_wrapped__')
|
||||
if method is not None and not fixtured:
|
||||
item.warn(PytestIPADeprecationWarning(
|
||||
"xunit style is deprecated in favour of "
|
||||
"fixtures style"))
|
||||
|
||||
xunit_depr_warn(item, 'module', forbidden_module_scopes)
|
||||
xunit_depr_warn(item, 'cls', forbidden_class_scopes)
|
||||
472
ipatests/pytest_ipa/integration/__init__.py
Normal file
472
ipatests/pytest_ipa/integration/__init__.py
Normal file
@@ -0,0 +1,472 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2011 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Pytest plugin for IPA Integration tests"""
|
||||
|
||||
from __future__ import print_function, absolute_import
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import re
|
||||
|
||||
import pytest
|
||||
from pytest_multihost import make_multihost_fixture
|
||||
|
||||
from ipapython import ipautil
|
||||
from ipaplatform.paths import paths
|
||||
from .config import Config
|
||||
from .env_config import get_global_config
|
||||
from . import tasks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CLASS_LOGFILES = [
|
||||
# dirsrv logs
|
||||
paths.VAR_LOG_DIRSRV,
|
||||
# IPA install logs
|
||||
paths.IPASERVER_INSTALL_LOG,
|
||||
paths.IPACLIENT_INSTALL_LOG,
|
||||
paths.IPAREPLICA_INSTALL_LOG,
|
||||
paths.IPAREPLICA_CONNCHECK_LOG,
|
||||
paths.IPAREPLICA_CA_INSTALL_LOG,
|
||||
paths.IPASERVER_KRA_INSTALL_LOG,
|
||||
paths.IPA_CUSTODIA_AUDIT_LOG,
|
||||
paths.IPACLIENTSAMBA_INSTALL_LOG,
|
||||
paths.IPACLIENTSAMBA_UNINSTALL_LOG,
|
||||
paths.IPATRUSTENABLEAGENT_LOG,
|
||||
# IPA uninstall logs
|
||||
paths.IPASERVER_UNINSTALL_LOG,
|
||||
paths.IPACLIENT_UNINSTALL_LOG,
|
||||
# IPA upgrade logs
|
||||
paths.IPAUPGRADE_LOG,
|
||||
# IPA backup and restore logs
|
||||
paths.IPARESTORE_LOG,
|
||||
paths.IPABACKUP_LOG,
|
||||
# kerberos related logs
|
||||
paths.KADMIND_LOG,
|
||||
paths.KRB5KDC_LOG,
|
||||
# httpd logs
|
||||
paths.VAR_LOG_HTTPD_DIR,
|
||||
# dogtag logs
|
||||
paths.VAR_LOG_PKI_DIR,
|
||||
# selinux logs
|
||||
paths.VAR_LOG_AUDIT,
|
||||
# sssd
|
||||
paths.VAR_LOG_SSSD_DIR,
|
||||
# system
|
||||
paths.RESOLV_CONF,
|
||||
paths.HOSTS,
|
||||
]
|
||||
|
||||
|
||||
def make_class_logs(host):
|
||||
logs = list(CLASS_LOGFILES)
|
||||
env_filename = os.path.join(host.config.test_dir, 'env.sh')
|
||||
logs.append(env_filename)
|
||||
return logs
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("IPA integration tests")
|
||||
|
||||
group.addoption(
|
||||
'--logfile-dir', dest="logfile_dir", default=None,
|
||||
help="Directory to store integration test logs in.")
|
||||
|
||||
|
||||
def _get_logname_from_node(node):
|
||||
name = node.nodeid
|
||||
name = re.sub(r'\(\)/', '', name) # remove ()/
|
||||
name = re.sub(r'[()]', '', name) # and standalone brackets
|
||||
name = re.sub(r'(/|::)', '-', name)
|
||||
return name
|
||||
|
||||
|
||||
def collect_test_logs(node, logs_dict, test_config, suffix=''):
|
||||
"""Collect logs from a test
|
||||
|
||||
Calls collect_logs and collect_systemd_journal
|
||||
|
||||
:param node: The pytest collection node (request.node)
|
||||
:param logs_dict: Mapping of host to list of log filnames to collect
|
||||
:param test_config: Pytest configuration
|
||||
:param suffix: The custom suffix of the name of logfiles' directory
|
||||
"""
|
||||
name = '{node}{suffix}'.format(
|
||||
node=_get_logname_from_node(node),
|
||||
suffix=suffix,
|
||||
)
|
||||
logfile_dir = test_config.getoption('logfile_dir')
|
||||
collect_logs(
|
||||
name=name,
|
||||
logs_dict=logs_dict,
|
||||
logfile_dir=logfile_dir,
|
||||
beakerlib_plugin=test_config.pluginmanager.getplugin('BeakerLibPlugin'),
|
||||
)
|
||||
|
||||
hosts = logs_dict.keys() # pylint: disable=dict-keys-not-iterating
|
||||
collect_systemd_journal(name, hosts, logfile_dir)
|
||||
|
||||
|
||||
def collect_systemd_journal(name, hosts, logfile_dir=None):
|
||||
"""Collect systemd journal from remote hosts
|
||||
|
||||
:param name: Name under which logs are collected, e.g. name of the test
|
||||
:param hosts: List of hosts from which to collect journal
|
||||
:param logfile_dir: Directory to log to
|
||||
"""
|
||||
if logfile_dir is None:
|
||||
return
|
||||
|
||||
for host in hosts:
|
||||
logger.info("Collecting journal from: %s", host.hostname)
|
||||
|
||||
topdirname = os.path.join(logfile_dir, name, host.hostname)
|
||||
if not os.path.exists(topdirname):
|
||||
os.makedirs(topdirname)
|
||||
|
||||
# Get journal content
|
||||
cmd = host.run_command(
|
||||
['journalctl', '--since', host.config.log_journal_since],
|
||||
log_stdout=False, raiseonerr=False)
|
||||
if cmd.returncode:
|
||||
logger.error('An error occurred while collecting journal')
|
||||
continue
|
||||
|
||||
# Write journal to file
|
||||
with open(os.path.join(topdirname, "journal"), 'w') as f:
|
||||
f.write(cmd.stdout_text)
|
||||
|
||||
|
||||
def collect_logs(name, logs_dict, logfile_dir=None, beakerlib_plugin=None):
|
||||
"""Collect logs from remote hosts
|
||||
|
||||
Calls collect_logs
|
||||
|
||||
:param name: Name under which logs arecollected, e.g. name of the test
|
||||
:param logs_dict: Mapping of host to list of log filnames to collect
|
||||
:param logfile_dir: Directory to log to
|
||||
:param beakerlib_plugin:
|
||||
BeakerLibProcess or BeakerLibPlugin used to collect tests for BeakerLib
|
||||
|
||||
If neither logfile_dir nor beakerlib_plugin is given, no tests are
|
||||
collected.
|
||||
"""
|
||||
if logs_dict and (logfile_dir or beakerlib_plugin):
|
||||
|
||||
if logfile_dir:
|
||||
remove_dir = False
|
||||
else:
|
||||
logfile_dir = tempfile.mkdtemp()
|
||||
remove_dir = True
|
||||
|
||||
topdirname = os.path.join(logfile_dir, name)
|
||||
|
||||
for host, logs in logs_dict.items():
|
||||
logger.info('Collecting logs from: %s', host.hostname)
|
||||
# make list of unique log filenames
|
||||
logs = list(set(logs))
|
||||
dirname = os.path.join(topdirname, host.hostname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname)
|
||||
tarname = os.path.join(dirname, 'logs.tar.xz')
|
||||
# get temporary file name
|
||||
cmd = host.run_command(['mktemp'])
|
||||
tmpname = cmd.stdout_text.strip()
|
||||
# Tar up the logs on the remote server
|
||||
cmd = host.run_command(
|
||||
['tar', 'cJvf', tmpname, '--ignore-failed-read'] + logs,
|
||||
log_stdout=False, raiseonerr=False)
|
||||
if cmd.returncode:
|
||||
logger.warning('Could not collect all requested logs')
|
||||
# fetch tar file
|
||||
with open(tarname, 'wb') as f:
|
||||
f.write(host.get_file_contents(tmpname))
|
||||
# delete from remote
|
||||
host.run_command(['rm', '-f', tmpname])
|
||||
# Unpack on the local side
|
||||
ipautil.run([paths.TAR, 'xJvf', 'logs.tar.xz'], cwd=dirname,
|
||||
raiseonerr=False)
|
||||
os.unlink(tarname)
|
||||
|
||||
if beakerlib_plugin:
|
||||
# Use BeakerLib's rlFileSubmit on the indifidual files
|
||||
# The resulting submitted filename will be
|
||||
# $HOSTNAME-$FILENAME (with '/' replaced by '-')
|
||||
beakerlib_plugin.run_beakerlib_command(['pushd', topdirname])
|
||||
try:
|
||||
for dirpath, _dirnames, filenames in os.walk(topdirname):
|
||||
for filename in filenames:
|
||||
fullname = os.path.relpath(
|
||||
os.path.join(dirpath, filename), topdirname)
|
||||
logger.debug('Submitting file: %s', fullname)
|
||||
beakerlib_plugin.run_beakerlib_command(
|
||||
['rlFileSubmit', fullname])
|
||||
finally:
|
||||
beakerlib_plugin.run_beakerlib_command(['popd'])
|
||||
|
||||
if remove_dir:
|
||||
if beakerlib_plugin:
|
||||
# The BeakerLib process runs asynchronously, let it clean up
|
||||
# after it's done with the directory
|
||||
beakerlib_plugin.run_beakerlib_command(
|
||||
['rm', '-rvf', topdirname])
|
||||
else:
|
||||
shutil.rmtree(topdirname)
|
||||
|
||||
|
||||
class IntegrationLogs:
|
||||
"""Represent logfile collections
|
||||
Collection is a mapping of IPA hosts and a list of logfiles to be
|
||||
collected. There are two types of collections: class and method.
|
||||
The former contains a list of logfiles which will be collected on
|
||||
each test (within class) completion, while the latter contains
|
||||
a list of logfiles which will be collected on only certain test
|
||||
completion (once).
|
||||
"""
|
||||
def __init__(self):
|
||||
self._class_logs = {}
|
||||
self._method_logs = {}
|
||||
|
||||
def set_logs(self, host, logs):
|
||||
self._class_logs[host] = list(logs)
|
||||
|
||||
@property
|
||||
def method_logs(self):
|
||||
return self._method_logs
|
||||
|
||||
@property
|
||||
def class_logs(self):
|
||||
return self._class_logs
|
||||
|
||||
def init_method_logs(self):
|
||||
"""Initilize method logs with the class ones"""
|
||||
self._method_logs = {}
|
||||
for k in self._class_logs:
|
||||
self._method_logs[k] = list(self._class_logs[k])
|
||||
|
||||
def collect_class_log(self, host, filename):
|
||||
"""Add class scope log
|
||||
The file with the given filename will be collected from the
|
||||
host on an each test completion(within a test class).
|
||||
"""
|
||||
logger.info('Adding %s:%s to list of class logs to collect',
|
||||
host.external_hostname, filename)
|
||||
self._class_logs.setdefault(host, []).append(filename)
|
||||
self._method_logs.setdefault(host, []).append(filename)
|
||||
|
||||
def collect_method_log(self, host, filename):
|
||||
"""Add method scope log
|
||||
The file with the given filename will be collected from the
|
||||
host on a test completion.
|
||||
"""
|
||||
logger.info('Adding %s:%s to list of method logs to collect',
|
||||
host.external_hostname, filename)
|
||||
self._method_logs.setdefault(host, []).append(filename)
|
||||
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def class_integration_logs(request):
|
||||
"""Internal fixture providing class-level logs_dict
|
||||
For adjusting collection of logs, please, use 'integration_logs'
|
||||
fixture.
|
||||
"""
|
||||
integration_logs = IntegrationLogs()
|
||||
yield integration_logs
|
||||
# since the main fixture of integration tests('mh') depends on
|
||||
# this one the class logs collecting happens *after* the teardown
|
||||
# of that fixture. The 'uninstall' is among the finalizers of 'mh'.
|
||||
# This means that the logs collected here are the IPA *uninstall*
|
||||
# logs.
|
||||
class_logs = integration_logs.class_logs
|
||||
collect_test_logs(request.node, class_logs, request.config,
|
||||
suffix='-uninstall')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def integration_logs(class_integration_logs, request):
|
||||
"""Provides access to test integration logs, and collects after each test
|
||||
To collect a logfile on a test completion one should add the dependency on
|
||||
this fixture and call its 'collect_method_log' method.
|
||||
For example, run TestFoo.
|
||||
```
|
||||
class TestFoo(IntegrationTest):
|
||||
def test_foo(self):
|
||||
pass
|
||||
|
||||
def test_bar(self, integration_logs):
|
||||
integration_logs.collect_method_log(self.master, '/logfile')
|
||||
```
|
||||
'/logfile' will be collected only for 'test_bar' test.
|
||||
|
||||
To collect a logfile on a test class completion one should add the
|
||||
dependency on this fixture and call its 'collect_class_log' method.
|
||||
For example, run TestFoo.
|
||||
```
|
||||
class TestFoo(IntegrationTest):
|
||||
def test_foo(self, integration_logs):
|
||||
integration_logs.collect_class_log(self.master, '/logfile')
|
||||
|
||||
def test_bar(self):
|
||||
pass
|
||||
```
|
||||
'/logfile' will be collected 3 times:
|
||||
1) on 'test_foo' completion
|
||||
2) on 'test_bar' completion
|
||||
3) on 'TestFoo' completion
|
||||
|
||||
Note, the registration of a collection works at the runtime. This means
|
||||
that if the '/logfile' will be registered in 'test_bar' then
|
||||
it will not be collected on 'test_foo' completion:
|
||||
1) on 'test_bar' completion
|
||||
2) on 'TestFoo' completion
|
||||
"""
|
||||
class_integration_logs.init_method_logs()
|
||||
yield class_integration_logs
|
||||
method_logs = class_integration_logs.method_logs
|
||||
collect_test_logs(request.node, method_logs, request.config)
|
||||
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def mh(request, class_integration_logs):
|
||||
"""IPA's multihost fixture object
|
||||
"""
|
||||
cls = request.cls
|
||||
|
||||
domain_description = {
|
||||
'type': 'IPA',
|
||||
'hosts': {
|
||||
'master': 1,
|
||||
'replica': cls.num_replicas,
|
||||
'client': cls.num_clients,
|
||||
},
|
||||
}
|
||||
domain_description['hosts'].update(
|
||||
{role: 1 for role in cls.required_extra_roles})
|
||||
|
||||
domain_descriptions = [domain_description]
|
||||
for _i in range(cls.num_ad_domains):
|
||||
domain_descriptions.append({
|
||||
'type': 'AD',
|
||||
'hosts': {'ad': 1}
|
||||
})
|
||||
for _i in range(cls.num_ad_subdomains):
|
||||
domain_descriptions.append({
|
||||
'type': 'AD_SUBDOMAIN',
|
||||
'hosts': {'ad_subdomain': 1}
|
||||
})
|
||||
for _i in range(cls.num_ad_treedomains):
|
||||
domain_descriptions.append({
|
||||
'type': 'AD_TREEDOMAIN',
|
||||
'hosts': {'ad_treedomain': 1}
|
||||
})
|
||||
|
||||
mh = make_multihost_fixture(
|
||||
request,
|
||||
domain_descriptions,
|
||||
config_class=Config,
|
||||
_config=get_global_config(),
|
||||
)
|
||||
|
||||
mh.domain = mh.config.domains[0]
|
||||
[mh.master] = mh.domain.hosts_by_role('master')
|
||||
mh.replicas = mh.domain.hosts_by_role('replica')
|
||||
mh.clients = mh.domain.hosts_by_role('client')
|
||||
ad_domains = mh.config.ad_domains
|
||||
if ad_domains:
|
||||
mh.ads = []
|
||||
for domain in ad_domains:
|
||||
mh.ads.extend(domain.hosts_by_role('ad'))
|
||||
mh.ad_subdomains = []
|
||||
for domain in ad_domains:
|
||||
mh.ad_subdomains.extend(domain.hosts_by_role('ad_subdomain'))
|
||||
mh.ad_treedomains = []
|
||||
for domain in ad_domains:
|
||||
mh.ad_treedomains.extend(domain.hosts_by_role('ad_treedomain'))
|
||||
|
||||
cls.logs_to_collect = class_integration_logs.class_logs
|
||||
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
logger.info(pformat(mh.config.to_dict()))
|
||||
|
||||
for ipa_host in mh.config.get_all_ipa_hosts():
|
||||
class_integration_logs.set_logs(ipa_host, make_class_logs(ipa_host))
|
||||
|
||||
for host in mh.config.get_all_hosts():
|
||||
logger.info('Preparing host %s', host.hostname)
|
||||
tasks.prepare_host(host)
|
||||
|
||||
add_compat_attrs(cls, mh)
|
||||
|
||||
def fin():
|
||||
del_compat_attrs(cls)
|
||||
mh._pytestmh_request.addfinalizer(fin)
|
||||
|
||||
try:
|
||||
yield mh.install()
|
||||
finally:
|
||||
# the 'mh' fixture depends on 'class_integration_logs' one,
|
||||
# thus, the class logs collecting happens *after* the teardown
|
||||
# of 'mh' fixture. The 'uninstall' is among the finalizers of 'mh'.
|
||||
# This means that the logs collected here are the IPA *uninstall*
|
||||
# logs and the 'install' ones can be removed during the IPA
|
||||
# uninstall phase. To address this problem(e.g. installation error)
|
||||
# the install logs will be collected into '{nodeid}-install' directory
|
||||
# while the uninstall ones into '{nodeid}-uninstall'.
|
||||
class_logs = class_integration_logs.class_logs
|
||||
collect_test_logs(request.node, class_logs, request.config,
|
||||
suffix='-install')
|
||||
|
||||
|
||||
def add_compat_attrs(cls, mh):
|
||||
"""Add convenience attributes to the test class
|
||||
|
||||
This is deprecated in favor of the mh fixture.
|
||||
To be removed when no more tests using this.
|
||||
"""
|
||||
cls.domain = mh.domain
|
||||
cls.master = mh.master
|
||||
cls.replicas = mh.replicas
|
||||
cls.clients = mh.clients
|
||||
cls.ad_domains = mh.config.ad_domains
|
||||
if cls.ad_domains:
|
||||
cls.ads = mh.ads
|
||||
cls.ad_subdomains = mh.ad_subdomains
|
||||
cls.ad_treedomains = mh.ad_treedomains
|
||||
|
||||
|
||||
def del_compat_attrs(cls):
|
||||
"""Remove convenience attributes from the test class
|
||||
|
||||
This is deprecated in favor of the mh fixture.
|
||||
To be removed when no more tests using this.
|
||||
"""
|
||||
del cls.master
|
||||
del cls.replicas
|
||||
del cls.clients
|
||||
del cls.domain
|
||||
if cls.ad_domains:
|
||||
del cls.ads
|
||||
del cls.ad_subdomains
|
||||
del cls.ad_treedomains
|
||||
del cls.ad_domains
|
||||
199
ipatests/pytest_ipa/integration/config.py
Normal file
199
ipatests/pytest_ipa/integration/config.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
# Tomas Babej <tbabej@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2013 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Utilities for configuration of multi-master tests"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
|
||||
import pytest
|
||||
import pytest_multihost.config
|
||||
|
||||
from ipapython.dn import DN
|
||||
from ipalib.constants import MAX_DOMAIN_LEVEL
|
||||
|
||||
|
||||
class Config(pytest_multihost.config.Config):
|
||||
extra_init_args = {
|
||||
'admin_name',
|
||||
'admin_password',
|
||||
'dirman_dn',
|
||||
'dirman_password',
|
||||
'nis_domain',
|
||||
'ntp_server',
|
||||
'ad_admin_name',
|
||||
'ad_admin_password',
|
||||
'dns_forwarder',
|
||||
'domain_level',
|
||||
'log_journal_since',
|
||||
'fips_mode',
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('test_dir', '/root/ipatests')
|
||||
super(Config, self).__init__(**kwargs)
|
||||
|
||||
admin_password = kwargs.get('admin_password') or 'Secret123'
|
||||
|
||||
self.admin_name = kwargs.get('admin_name') or 'admin'
|
||||
self.admin_password = admin_password
|
||||
self.dirman_dn = DN(kwargs.get('dirman_dn') or 'cn=Directory Manager')
|
||||
self.dirman_password = kwargs.get('dirman_password') or admin_password
|
||||
self.nis_domain = kwargs.get('nis_domain') or 'ipatest'
|
||||
self.ntp_server = str(kwargs.get('ntp_server') or (
|
||||
'%s.pool.ntp.org' % random.randint(0, 3)))
|
||||
self.ad_admin_name = kwargs.get('ad_admin_name') or 'Administrator'
|
||||
self.ad_admin_password = kwargs.get('ad_admin_password') or 'Secret123'
|
||||
self.domain_level = kwargs.get('domain_level', MAX_DOMAIN_LEVEL)
|
||||
# 8.8.8.8 is probably the best-known public DNS
|
||||
self.dns_forwarder = kwargs.get('dns_forwarder') or '8.8.8.8'
|
||||
self.debug = False
|
||||
self.log_journal_since = kwargs.get('log_journal_since') or '-1h'
|
||||
if self.domain_level is None:
|
||||
self.domain_level = MAX_DOMAIN_LEVEL
|
||||
self.fips_mode = kwargs.get('fips_mode', False)
|
||||
|
||||
def get_domain_class(self):
|
||||
return Domain
|
||||
|
||||
def get_logger(self, name):
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
return logger
|
||||
|
||||
@property
|
||||
def ad_domains(self):
|
||||
return [d for d in self.domains if d.is_ad_type]
|
||||
|
||||
def get_all_hosts(self):
|
||||
for domain in self.domains:
|
||||
for host in domain.hosts:
|
||||
yield host
|
||||
|
||||
def get_all_ipa_hosts(self):
|
||||
for ipa_domain in (d for d in self.domains if d.is_ipa_type):
|
||||
for ipa_host in ipa_domain.hosts:
|
||||
yield ipa_host
|
||||
|
||||
def to_dict(self):
|
||||
extra_args = self.extra_init_args - {'dirman_dn'}
|
||||
result = super(Config, self).to_dict(extra_args)
|
||||
result['dirman_dn'] = str(self.dirman_dn)
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, env):
|
||||
from ipatests.pytest_ipa.integration.env_config import config_from_env
|
||||
return config_from_env(env)
|
||||
|
||||
def to_env(self, **kwargs):
|
||||
from ipatests.pytest_ipa.integration.env_config import config_to_env
|
||||
return config_to_env(self, **kwargs)
|
||||
|
||||
def filter(self, descriptions):
|
||||
"""Destructively filters hosts and orders domains to fit description
|
||||
|
||||
By default make_multihost_fixture() skips a test case, when filter()
|
||||
returns a FilterError. Let's turn FilterError into a fatal error
|
||||
instead.
|
||||
"""
|
||||
try:
|
||||
super(Config, self).filter(descriptions)
|
||||
except pytest_multihost.config.FilterError as e:
|
||||
pytest.fail(str(e))
|
||||
|
||||
|
||||
class Domain(pytest_multihost.config.Domain):
|
||||
"""Configuration for an IPA / AD domain"""
|
||||
def __init__(self, config, name, domain_type):
|
||||
self.type = str(domain_type)
|
||||
|
||||
self.config = config
|
||||
self.name = str(name)
|
||||
self.hosts = []
|
||||
|
||||
assert self.is_ipa_type or self.is_ad_type
|
||||
self.realm = self.name.upper()
|
||||
self.basedn = DN(*(('dc', p) for p in name.split('.')))
|
||||
|
||||
@property
|
||||
def is_ipa_type(self):
|
||||
return self.type == 'IPA'
|
||||
|
||||
@property
|
||||
def is_ad_type(self):
|
||||
return self.type == 'AD' or self.type.startswith('AD_')
|
||||
|
||||
@property
|
||||
def static_roles(self):
|
||||
# Specific roles for each domain type are hardcoded
|
||||
if self.type == 'IPA':
|
||||
return ('master', 'replica', 'client', 'other')
|
||||
elif self.type == 'AD':
|
||||
return ('ad',)
|
||||
elif self.type == 'AD_SUBDOMAIN':
|
||||
return ('ad_subdomain',)
|
||||
elif self.type == 'AD_TREEDOMAIN':
|
||||
return ('ad_treedomain',)
|
||||
else:
|
||||
raise LookupError(self.type)
|
||||
|
||||
def get_host_class(self, host_dict):
|
||||
from ipatests.pytest_ipa.integration.host import Host, WinHost
|
||||
|
||||
if self.is_ipa_type:
|
||||
return Host
|
||||
elif self.is_ad_type:
|
||||
return WinHost
|
||||
else:
|
||||
raise LookupError(self.type)
|
||||
|
||||
@property
|
||||
def master(self):
|
||||
return self.host_by_role('master')
|
||||
|
||||
@property
|
||||
def masters(self):
|
||||
return self.hosts_by_role('master')
|
||||
|
||||
@property
|
||||
def replicas(self):
|
||||
return self.hosts_by_role('replica')
|
||||
|
||||
@property
|
||||
def clients(self):
|
||||
return self.hosts_by_role('client')
|
||||
|
||||
@property
|
||||
def ads(self):
|
||||
return self.hosts_by_role('ad')
|
||||
|
||||
@property
|
||||
def other_hosts(self):
|
||||
return self.hosts_by_role('other')
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, env, config, index, domain_type):
|
||||
from ipatests.pytest_ipa.integration.env_config import domain_from_env
|
||||
return domain_from_env(env, config, index, domain_type)
|
||||
|
||||
def to_env(self, **kwargs):
|
||||
from ipatests.pytest_ipa.integration.env_config import domain_to_env
|
||||
return domain_to_env(self, **kwargs)
|
||||
578
ipatests/pytest_ipa/integration/create_caless_pki.py
Normal file
578
ipatests/pytest_ipa/integration/create_caless_pki.py
Normal file
@@ -0,0 +1,578 @@
|
||||
# Copyright (c) 2015-2017, Jan Cholasta <jcholast@redhat.com>
|
||||
#
|
||||
# Permission to use, copy, modify, and/or distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
|
||||
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import six
|
||||
|
||||
from cryptography import __version__ as cryptography_version
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes, serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.x509.oid import NameOID
|
||||
from pkg_resources import parse_version
|
||||
from pyasn1.type import univ, char, namedtype, tag
|
||||
from pyasn1.codec.der import encoder as der_encoder
|
||||
from pyasn1.codec.native import decoder as native_decoder
|
||||
|
||||
if six.PY3:
|
||||
unicode = str
|
||||
|
||||
DAY = datetime.timedelta(days=1)
|
||||
YEAR = 365 * DAY
|
||||
|
||||
# we get the variables from ca_less test
|
||||
domain = None
|
||||
realm = None
|
||||
server1 = None
|
||||
server2 = None
|
||||
client = None
|
||||
password = None
|
||||
cert_dir = None
|
||||
|
||||
CertInfo = collections.namedtuple('CertInfo', 'nick key cert counter')
|
||||
|
||||
|
||||
class PrincipalName(univ.Sequence):
|
||||
'''See RFC 4120 for details'''
|
||||
componentType = namedtype.NamedTypes(
|
||||
namedtype.NamedType(
|
||||
'name-type',
|
||||
univ.Integer().subtype(
|
||||
explicitTag=tag.Tag(
|
||||
tag.tagClassContext,
|
||||
tag.tagFormatSimple,
|
||||
0,
|
||||
),
|
||||
),
|
||||
),
|
||||
namedtype.NamedType(
|
||||
'name-string',
|
||||
univ.SequenceOf(char.GeneralString()).subtype(
|
||||
explicitTag=tag.Tag(
|
||||
tag.tagClassContext,
|
||||
tag.tagFormatSimple,
|
||||
1,
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class KRB5PrincipalName(univ.Sequence):
|
||||
'''See RFC 4556 for details'''
|
||||
componentType = namedtype.NamedTypes(
|
||||
namedtype.NamedType(
|
||||
'realm',
|
||||
char.GeneralString().subtype(
|
||||
explicitTag=tag.Tag(
|
||||
tag.tagClassContext,
|
||||
tag.tagFormatSimple,
|
||||
0,
|
||||
),
|
||||
),
|
||||
),
|
||||
namedtype.NamedType(
|
||||
'principalName',
|
||||
PrincipalName().subtype(
|
||||
explicitTag=tag.Tag(
|
||||
tag.tagClassContext,
|
||||
tag.tagFormatSimple,
|
||||
1,
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def profile_ca(builder, ca_nick, ca):
|
||||
now = datetime.datetime.utcnow()
|
||||
|
||||
builder = builder.not_valid_before(now)
|
||||
builder = builder.not_valid_after(now + 10 * YEAR)
|
||||
|
||||
crl_uri = u'file://{}.crl'.format(os.path.join(cert_dir, ca_nick))
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.KeyUsage(
|
||||
digital_signature=True,
|
||||
content_commitment=True,
|
||||
key_encipherment=False,
|
||||
data_encipherment=False,
|
||||
key_agreement=False,
|
||||
key_cert_sign=True,
|
||||
crl_sign=True,
|
||||
encipher_only=False,
|
||||
decipher_only=False,
|
||||
),
|
||||
critical=True,
|
||||
)
|
||||
builder = builder.add_extension(
|
||||
x509.BasicConstraints(ca=True, path_length=None),
|
||||
critical=True,
|
||||
)
|
||||
builder = builder.add_extension(
|
||||
x509.CRLDistributionPoints([
|
||||
x509.DistributionPoint(
|
||||
full_name=[x509.UniformResourceIdentifier(crl_uri)],
|
||||
relative_name=None,
|
||||
crl_issuer=None,
|
||||
reasons=None,
|
||||
),
|
||||
]),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
public_key = builder._public_key
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.SubjectKeyIdentifier.from_public_key(public_key),
|
||||
critical=False,
|
||||
)
|
||||
# here we get "ca" only for "ca1/subca" CA
|
||||
if not ca:
|
||||
builder = builder.add_extension(
|
||||
x509.AuthorityKeyIdentifier.from_issuer_public_key(public_key),
|
||||
critical=False,
|
||||
)
|
||||
else:
|
||||
ski_ext = ca.cert.extensions.get_extension_for_class(
|
||||
x509.SubjectKeyIdentifier
|
||||
)
|
||||
auth_keyidentifier = (x509.AuthorityKeyIdentifier
|
||||
.from_issuer_subject_key_identifier)
|
||||
'''
|
||||
cryptography < 2.7 accepts only Extension object.
|
||||
Remove this workaround when all supported platforms update
|
||||
python-cryptography.
|
||||
'''
|
||||
if (parse_version(cryptography_version) >= parse_version('2.7')):
|
||||
extension = auth_keyidentifier(ski_ext.value)
|
||||
else:
|
||||
extension = auth_keyidentifier(ski_ext)
|
||||
|
||||
builder = builder.add_extension(extension, critical=False)
|
||||
return builder
|
||||
|
||||
|
||||
def profile_server(builder, ca_nick, ca,
|
||||
warp=datetime.timedelta(days=0), dns_name=None,
|
||||
badusage=False, wildcard=False):
|
||||
now = datetime.datetime.utcnow() + warp
|
||||
|
||||
builder = builder.not_valid_before(now)
|
||||
builder = builder.not_valid_after(now + YEAR)
|
||||
|
||||
crl_uri = u'file://{}.crl'.format(os.path.join(cert_dir, ca_nick))
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.CRLDistributionPoints([
|
||||
x509.DistributionPoint(
|
||||
full_name=[x509.UniformResourceIdentifier(crl_uri)],
|
||||
relative_name=None,
|
||||
crl_issuer=None,
|
||||
reasons=None,
|
||||
),
|
||||
]),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
if dns_name is not None:
|
||||
builder = builder.add_extension(
|
||||
x509.SubjectAlternativeName([x509.DNSName(dns_name)]),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
if badusage:
|
||||
builder = builder.add_extension(
|
||||
x509.KeyUsage(
|
||||
digital_signature=False,
|
||||
content_commitment=False,
|
||||
key_encipherment=False,
|
||||
data_encipherment=True,
|
||||
key_agreement=True,
|
||||
key_cert_sign=False,
|
||||
crl_sign=False,
|
||||
encipher_only=False,
|
||||
decipher_only=False
|
||||
),
|
||||
critical=False
|
||||
)
|
||||
|
||||
if wildcard:
|
||||
names = [x509.DNSName(u'*.' + domain)]
|
||||
server_split = server1.split('.', 1)
|
||||
if len(server_split) == 2 and domain != server_split[1]:
|
||||
names.append(x509.DNSName(u'*.' + server_split[1]))
|
||||
builder = builder.add_extension(
|
||||
x509.SubjectAlternativeName(names),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
return builder
|
||||
|
||||
|
||||
def profile_kdc(builder, ca_nick, ca,
|
||||
warp=datetime.timedelta(days=0), dns_name=None,
|
||||
badusage=False):
|
||||
now = datetime.datetime.utcnow() + warp
|
||||
|
||||
builder = builder.not_valid_before(now)
|
||||
builder = builder.not_valid_after(now + YEAR)
|
||||
|
||||
crl_uri = u'file://{}.crl'.format(os.path.join(cert_dir, ca_nick))
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.ExtendedKeyUsage([x509.ObjectIdentifier('1.3.6.1.5.2.3.5')]),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
name = {
|
||||
'realm': realm,
|
||||
'principalName': {
|
||||
'name-type': 2,
|
||||
'name-string': ['krbtgt', realm],
|
||||
},
|
||||
}
|
||||
name = native_decoder.decode(name, asn1Spec=KRB5PrincipalName())
|
||||
name = der_encoder.encode(name)
|
||||
|
||||
names = [x509.OtherName(x509.ObjectIdentifier('1.3.6.1.5.2.2'), name)]
|
||||
if dns_name is not None:
|
||||
names += [x509.DNSName(dns_name)]
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.SubjectAlternativeName(names),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
builder = builder.add_extension(
|
||||
x509.CRLDistributionPoints([
|
||||
x509.DistributionPoint(
|
||||
full_name=[x509.UniformResourceIdentifier(crl_uri)],
|
||||
relative_name=None,
|
||||
crl_issuer=None,
|
||||
reasons=None,
|
||||
),
|
||||
]),
|
||||
critical=False,
|
||||
)
|
||||
|
||||
if badusage:
|
||||
builder = builder.add_extension(
|
||||
x509.KeyUsage(
|
||||
digital_signature=False,
|
||||
content_commitment=False,
|
||||
key_encipherment=False,
|
||||
data_encipherment=True,
|
||||
key_agreement=True,
|
||||
key_cert_sign=False,
|
||||
crl_sign=False,
|
||||
encipher_only=False,
|
||||
decipher_only=False
|
||||
),
|
||||
critical=False
|
||||
)
|
||||
|
||||
return builder
|
||||
|
||||
|
||||
def gen_cert(profile, nick_base, subject, ca=None, **kwargs):
|
||||
key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
backend=default_backend(),
|
||||
)
|
||||
public_key = key.public_key()
|
||||
|
||||
counter = itertools.count(1)
|
||||
|
||||
if ca is not None:
|
||||
ca_nick, ca_key, ca_cert, ca_counter = ca
|
||||
nick = os.path.join(ca_nick, nick_base)
|
||||
issuer = ca_cert.subject
|
||||
else:
|
||||
nick = ca_nick = nick_base
|
||||
ca_key = key
|
||||
ca_counter = counter
|
||||
issuer = subject
|
||||
|
||||
serial = next(ca_counter)
|
||||
|
||||
builder = x509.CertificateBuilder()
|
||||
builder = builder.serial_number(serial)
|
||||
builder = builder.issuer_name(issuer)
|
||||
builder = builder.subject_name(subject)
|
||||
builder = builder.public_key(public_key)
|
||||
builder = profile(builder, ca_nick, ca, **kwargs)
|
||||
|
||||
cert = builder.sign(
|
||||
private_key=ca_key,
|
||||
algorithm=hashes.SHA256(),
|
||||
backend=default_backend(),
|
||||
)
|
||||
|
||||
key_pem = key.private_bytes(
|
||||
serialization.Encoding.PEM,
|
||||
serialization.PrivateFormat.PKCS8,
|
||||
serialization.BestAvailableEncryption(password.encode()),
|
||||
)
|
||||
cert_pem = cert.public_bytes(serialization.Encoding.PEM)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(os.path.join(cert_dir, nick)))
|
||||
except OSError:
|
||||
pass
|
||||
with open(os.path.join(cert_dir, nick + '.key'), 'wb') as f:
|
||||
f.write(key_pem)
|
||||
with open(os.path.join(cert_dir, nick + '.crt'), 'wb') as f:
|
||||
f.write(cert_pem)
|
||||
|
||||
return CertInfo(nick, key, cert, counter)
|
||||
|
||||
|
||||
def revoke_cert(ca, serial):
|
||||
now = datetime.datetime.utcnow()
|
||||
|
||||
crl_builder = x509.CertificateRevocationListBuilder()
|
||||
crl_builder = crl_builder.issuer_name(ca.cert.subject)
|
||||
crl_builder = crl_builder.last_update(now)
|
||||
crl_builder = crl_builder.next_update(now + DAY)
|
||||
|
||||
crl_filename = os.path.join(cert_dir, ca.nick + '.crl')
|
||||
|
||||
try:
|
||||
f = open(crl_filename, 'rb')
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
with f:
|
||||
crl_pem = f.read()
|
||||
|
||||
crl = x509.load_pem_x509_crl(crl_pem, default_backend())
|
||||
|
||||
for revoked_cert in crl:
|
||||
crl_builder = crl_builder.add_revoked_certificate(revoked_cert)
|
||||
|
||||
builder = x509.RevokedCertificateBuilder()
|
||||
builder = builder.serial_number(serial)
|
||||
builder = builder.revocation_date(now)
|
||||
|
||||
revoked_cert = builder.build(default_backend())
|
||||
|
||||
crl_builder = crl_builder.add_revoked_certificate(revoked_cert)
|
||||
|
||||
crl = crl_builder.sign(
|
||||
private_key=ca.key,
|
||||
algorithm=hashes.SHA256(),
|
||||
backend=default_backend(),
|
||||
)
|
||||
|
||||
crl_pem = crl.public_bytes(serialization.Encoding.PEM)
|
||||
|
||||
with open(crl_filename, 'wb') as f:
|
||||
f.write(crl_pem)
|
||||
|
||||
|
||||
def gen_server_certs(nick_base, hostname, org, ca=None):
|
||||
gen_cert(profile_server, nick_base,
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, dns_name=hostname
|
||||
)
|
||||
gen_cert(profile_server, nick_base + u'-badname',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'not-' + hostname)
|
||||
]),
|
||||
ca, dns_name=u'not-' + hostname
|
||||
)
|
||||
gen_cert(profile_server, nick_base + u'-altname',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'alt-' + hostname)
|
||||
]),
|
||||
ca, dns_name=hostname
|
||||
)
|
||||
gen_cert(profile_server, nick_base + u'-expired',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Expired'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, dns_name=hostname, warp=-2 * YEAR
|
||||
)
|
||||
gen_cert(
|
||||
profile_server, nick_base + u'-not-yet-valid',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'Future'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
|
||||
]),
|
||||
ca, dns_name=hostname, warp=1 * DAY,
|
||||
)
|
||||
gen_cert(profile_server, nick_base + u'-badusage',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Bad Usage'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, dns_name=hostname, badusage=True
|
||||
)
|
||||
revoked = gen_cert(profile_server, nick_base + u'-revoked',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Revoked'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, dns_name=hostname
|
||||
)
|
||||
revoke_cert(ca, revoked.cert.serial_number)
|
||||
|
||||
|
||||
def gen_kdc_certs(nick_base, hostname, org, ca=None):
|
||||
gen_cert(profile_kdc, nick_base + u'-kdc',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca
|
||||
)
|
||||
gen_cert(profile_kdc, nick_base + u'-kdc-badname',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'not-' + hostname)
|
||||
]),
|
||||
ca
|
||||
)
|
||||
gen_cert(profile_kdc, nick_base + u'-kdc-altname',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'alt-' + hostname)
|
||||
]),
|
||||
ca, dns_name=hostname
|
||||
)
|
||||
gen_cert(profile_kdc, nick_base + u'-kdc-expired',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Expired KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, warp=-2 * YEAR
|
||||
)
|
||||
gen_cert(profile_kdc, nick_base + u'-kdc-badusage',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Bad Usage KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca, badusage=True
|
||||
)
|
||||
revoked = gen_cert(profile_kdc, nick_base + u'-kdc-revoked',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
|
||||
u'Revoked KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname)
|
||||
]),
|
||||
ca
|
||||
)
|
||||
revoke_cert(ca, revoked.cert.serial_number)
|
||||
|
||||
|
||||
def gen_subtree(nick_base, org, ca=None):
|
||||
subca = gen_cert(profile_ca, nick_base,
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'CA')
|
||||
]),
|
||||
ca
|
||||
)
|
||||
gen_cert(profile_server, u'wildcard',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'*.' + domain)
|
||||
]),
|
||||
subca, wildcard=True
|
||||
)
|
||||
gen_server_certs(u'server', server1, org, subca)
|
||||
gen_server_certs(u'replica', server2, org, subca)
|
||||
gen_server_certs(u'client', client, org, subca)
|
||||
gen_cert(profile_kdc, u'kdcwildcard',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, org),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, u'*.' + domain)
|
||||
]),
|
||||
subca
|
||||
)
|
||||
gen_kdc_certs(u'server', server1, org, subca)
|
||||
gen_kdc_certs(u'replica', server2, org, subca)
|
||||
gen_kdc_certs(u'client', client, org, subca)
|
||||
return subca
|
||||
|
||||
|
||||
def create_pki():
|
||||
|
||||
gen_cert(profile_server, u'server-selfsign',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Self-signed'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, server1)
|
||||
])
|
||||
)
|
||||
gen_cert(profile_server, u'replica-selfsign',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Self-signed'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, server2)
|
||||
])
|
||||
)
|
||||
gen_cert(profile_server, u'noca',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'No-CA'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, server1)
|
||||
])
|
||||
)
|
||||
gen_cert(profile_kdc, u'server-kdc-selfsign',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Self-signed'),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, server1)
|
||||
])
|
||||
)
|
||||
gen_cert(profile_kdc, u'replica-kdc-selfsign',
|
||||
x509.Name([
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Self-signed'),
|
||||
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'KDC'),
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, server2)
|
||||
])
|
||||
)
|
||||
ca1 = gen_subtree(u'ca1', u'Example Organization')
|
||||
gen_subtree(u'subca', u'Subsidiary Example Organization', ca1)
|
||||
gen_subtree(u'ca2', u'Other Example Organization')
|
||||
ca3 = gen_subtree(u'ca3', u'Unknown Organization')
|
||||
os.unlink(os.path.join(cert_dir, ca3.nick + '.key'))
|
||||
os.unlink(os.path.join(cert_dir, ca3.nick + '.crt'))
|
||||
373
ipatests/pytest_ipa/integration/env_config.py
Normal file
373
ipatests/pytest_ipa/integration/env_config.py
Normal file
@@ -0,0 +1,373 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
# Tomas Babej <tbabej@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2013 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Support for configuring multihost testing via environment variables
|
||||
|
||||
This is here to support tests configured for Beaker,
|
||||
such as the ones at https://github.com/freeipa/tests/
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import collections
|
||||
|
||||
from ipapython import ipautil
|
||||
from ipatests.pytest_ipa.integration.config import Config, Domain
|
||||
from ipalib.constants import MAX_DOMAIN_LEVEL
|
||||
|
||||
TESTHOST_PREFIX = 'TESTHOST_'
|
||||
|
||||
|
||||
_SettingInfo = collections.namedtuple('Setting', 'name var_name default')
|
||||
_setting_infos = (
|
||||
# Directory on which test-specific files will be stored,
|
||||
_SettingInfo('test_dir', 'IPATEST_DIR', '/root/ipatests'),
|
||||
|
||||
# File with root's private RSA key for SSH (default: ~/.ssh/id_rsa)
|
||||
_SettingInfo('ssh_key_filename', 'IPA_ROOT_SSH_KEY', None),
|
||||
|
||||
# SSH password for root (used if root_ssh_key_filename is not set)
|
||||
_SettingInfo('ssh_password', 'IPA_ROOT_SSH_PASSWORD', None),
|
||||
|
||||
_SettingInfo('admin_name', 'ADMINID', 'admin'),
|
||||
_SettingInfo('admin_password', 'ADMINPW', 'Secret123'),
|
||||
_SettingInfo('dirman_dn', 'ROOTDN', 'cn=Directory Manager'),
|
||||
_SettingInfo('dirman_password', 'ROOTDNPWD', None),
|
||||
|
||||
# 8.8.8.8 is probably the best-known public DNS
|
||||
_SettingInfo('dns_forwarder', 'DNSFORWARD', '8.8.8.8'),
|
||||
_SettingInfo('nis_domain', 'NISDOMAIN', 'ipatest'),
|
||||
_SettingInfo('ntp_server', 'NTPSERVER', None),
|
||||
_SettingInfo('ad_admin_name', 'ADADMINID', 'Administrator'),
|
||||
_SettingInfo('ad_admin_password', 'ADADMINPW', 'Secret123'),
|
||||
|
||||
_SettingInfo('ipv6', 'IPv6SETUP', False),
|
||||
_SettingInfo('debug', 'IPADEBUG', False),
|
||||
_SettingInfo('domain_level', 'DOMAINLVL', MAX_DOMAIN_LEVEL),
|
||||
|
||||
_SettingInfo('log_journal_since', 'LOG_JOURNAL_SINCE', '-1h'),
|
||||
# userspace FIPS mode
|
||||
_SettingInfo('fips_mode', 'IPA_FIPS_MODE', False),
|
||||
)
|
||||
|
||||
|
||||
def get_global_config(env=None):
|
||||
"""Create a test config from environment variables
|
||||
|
||||
If env is None, uses os.environ; otherwise env is an environment dict.
|
||||
|
||||
If IPATEST_YAML_CONFIG or IPATEST_JSON_CONFIG is set,
|
||||
configuration is read from the named file.
|
||||
For YAML, the PyYAML (python-yaml) library needs to be installed.
|
||||
|
||||
Otherwise, configuration is read from various curiously
|
||||
named environment variables:
|
||||
|
||||
See _setting_infos for test-wide settings
|
||||
|
||||
MASTER_env1: FQDN of the master
|
||||
REPLICA_env1: space-separated FQDNs of the replicas
|
||||
CLIENT_env1: space-separated FQDNs of the clients
|
||||
AD_env1: space-separated FQDNs of the Active Directories
|
||||
OTHER_env1: space-separated FQDNs of other hosts
|
||||
(same for _env2, _env3, etc)
|
||||
BEAKERREPLICA1_IP_env1: IP address of replica 1 in env 1
|
||||
(same for MASTER, CLIENT, or any extra defined ROLE)
|
||||
|
||||
For each machine that should be accessible to tests via extra roles,
|
||||
the following environment variable is necessary:
|
||||
|
||||
TESTHOST_<role>_env1: FQDN of the machine with the extra role <role>
|
||||
|
||||
You can also optionally specify the IP address of the host:
|
||||
BEAKER<role>_IP_env1: IP address of the machine of the extra role
|
||||
|
||||
The framework will try to resolve the hostname to its IP address
|
||||
if not passed via this environment variable.
|
||||
|
||||
Also see env_normalize() for alternate variable names
|
||||
"""
|
||||
if env is None:
|
||||
env = os.environ
|
||||
env = dict(env)
|
||||
|
||||
return config_from_env(env)
|
||||
|
||||
|
||||
def config_from_env(env):
|
||||
if 'IPATEST_YAML_CONFIG' in env:
|
||||
try:
|
||||
import yaml
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"%s, please install PyYAML package to fix it" % e)
|
||||
with open(env['IPATEST_YAML_CONFIG']) as file:
|
||||
confdict = yaml.safe_load(file)
|
||||
return Config.from_dict(confdict)
|
||||
|
||||
if 'IPATEST_JSON_CONFIG' in env:
|
||||
with open(env['IPATEST_JSON_CONFIG']) as file:
|
||||
confdict = json.load(file)
|
||||
return Config.from_dict(confdict)
|
||||
|
||||
env_normalize(env)
|
||||
|
||||
kwargs = {s.name: env.get(s.var_name, s.default)
|
||||
for s in _setting_infos}
|
||||
kwargs['domains'] = []
|
||||
|
||||
# $IPv6SETUP needs to be 'TRUE' to enable ipv6
|
||||
if isinstance(kwargs['ipv6'], str):
|
||||
kwargs['ipv6'] = (kwargs['ipv6'].upper() == 'TRUE')
|
||||
|
||||
config = Config(**kwargs)
|
||||
|
||||
# Either IPA master or AD can define a domain
|
||||
|
||||
domain_index = 1
|
||||
while (env.get('MASTER_env%s' % domain_index) or
|
||||
env.get('AD_env%s' % domain_index) or
|
||||
env.get('AD_SUBDOMAIN_env%s' % domain_index) or
|
||||
env.get('AD_TREEDOMAIN_env%s' % domain_index)):
|
||||
|
||||
if env.get('MASTER_env%s' % domain_index):
|
||||
# IPA domain takes precedence to AD domain in case of conflict
|
||||
config.domains.append(domain_from_env(env, config, domain_index,
|
||||
domain_type='IPA'))
|
||||
else:
|
||||
for domain_type in ('AD', 'AD_SUBDOMAIN', 'AD_TREEDOMAIN'):
|
||||
if env.get('%s_env%s' % (domain_type, domain_index)):
|
||||
config.domains.append(
|
||||
domain_from_env(env, config, domain_index,
|
||||
domain_type=domain_type))
|
||||
break
|
||||
domain_index += 1
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def config_to_env(config, simple=True):
|
||||
"""Convert this test config into environment variables"""
|
||||
try:
|
||||
env = collections.OrderedDict()
|
||||
except AttributeError:
|
||||
# Older Python versions
|
||||
env = {}
|
||||
|
||||
for setting in _setting_infos:
|
||||
value = getattr(config, setting.name)
|
||||
if value in (None, False):
|
||||
env[setting.var_name] = ''
|
||||
elif value is True:
|
||||
env[setting.var_name] = 'TRUE'
|
||||
else:
|
||||
env[setting.var_name] = str(value)
|
||||
|
||||
for domain in config.domains:
|
||||
env_suffix = '_env%s' % (config.domains.index(domain) + 1)
|
||||
env['DOMAIN%s' % env_suffix] = domain.name
|
||||
env['RELM%s' % env_suffix] = domain.realm
|
||||
env['BASEDN%s' % env_suffix] = str(domain.basedn)
|
||||
|
||||
for role in domain.roles:
|
||||
hosts = domain.hosts_by_role(role)
|
||||
|
||||
prefix = ('' if role in domain.static_roles
|
||||
else TESTHOST_PREFIX)
|
||||
|
||||
hostnames = ' '.join(h.hostname for h in hosts)
|
||||
env['%s%s%s' % (prefix, role.upper(), env_suffix)] = hostnames
|
||||
|
||||
ext_hostnames = ' '.join(h.external_hostname for h in hosts)
|
||||
env['BEAKER%s%s' % (role.upper(), env_suffix)] = ext_hostnames
|
||||
|
||||
ips = ' '.join(h.ip for h in hosts)
|
||||
env['BEAKER%s_IP%s' % (role.upper(), env_suffix)] = ips
|
||||
|
||||
for i, host in enumerate(hosts, start=1):
|
||||
suffix = '%s%s' % (role.upper(), i)
|
||||
prefix = ('' if role in domain.static_roles
|
||||
else TESTHOST_PREFIX)
|
||||
|
||||
ext_hostname = host.external_hostname
|
||||
env['%s%s%s' % (prefix, suffix,
|
||||
env_suffix)] = host.hostname
|
||||
env['BEAKER%s%s' % (suffix, env_suffix)] = ext_hostname
|
||||
env['BEAKER%s_IP%s' % (suffix, env_suffix)] = host.ip
|
||||
|
||||
if simple:
|
||||
# Simple Vars for simplicity and backwards compatibility with older
|
||||
# tests. This means no _env<NUM> suffix.
|
||||
if config.domains:
|
||||
default_domain = config.domains[0]
|
||||
if default_domain.master:
|
||||
env['MASTER'] = default_domain.master.hostname
|
||||
env['BEAKERMASTER'] = default_domain.master.external_hostname
|
||||
env['MASTERIP'] = default_domain.master.ip
|
||||
if default_domain.replicas:
|
||||
env['SLAVE'] = env['REPLICA'] = env['REPLICA_env1']
|
||||
env['BEAKERSLAVE'] = env['BEAKERREPLICA_env1']
|
||||
env['SLAVEIP'] = env['BEAKERREPLICA_IP_env1']
|
||||
if default_domain.clients:
|
||||
client = default_domain.clients[0]
|
||||
env['CLIENT'] = client.hostname
|
||||
env['BEAKERCLIENT'] = client.external_hostname
|
||||
if len(default_domain.clients) >= 2:
|
||||
client = default_domain.clients[1]
|
||||
env['CLIENT2'] = client.hostname
|
||||
env['BEAKERCLIENT2'] = client.external_hostname
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def env_normalize(env):
|
||||
"""Fill env variables from alternate variable names
|
||||
|
||||
MASTER_env1 <- MASTER
|
||||
REPLICA_env1 <- REPLICA, SLAVE
|
||||
CLIENT_env1 <- CLIENT
|
||||
similarly for BEAKER* variants: BEAKERMASTER1_env1 <- BEAKERMASTER, etc.
|
||||
|
||||
CLIENT_env1 gets extended with CLIENT2 or CLIENT2_env1
|
||||
"""
|
||||
def coalesce(name, *other_names):
|
||||
"""If name is not set, set it to first existing env[other_name]"""
|
||||
if name not in env:
|
||||
for other_name in other_names:
|
||||
try:
|
||||
env[name] = env[other_name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
return
|
||||
env[name] = ''
|
||||
|
||||
coalesce('MASTER_env1', 'MASTER')
|
||||
coalesce('REPLICA_env1', 'REPLICA', 'SLAVE')
|
||||
coalesce('CLIENT_env1', 'CLIENT')
|
||||
|
||||
coalesce('BEAKERMASTER1_env1', 'BEAKERMASTER')
|
||||
coalesce('BEAKERREPLICA1_env1', 'BEAKERREPLICA', 'BEAKERSLAVE')
|
||||
coalesce('BEAKERCLIENT1_env1', 'BEAKERCLIENT')
|
||||
|
||||
def extend(name, name2):
|
||||
value = env.get(name2)
|
||||
if value and value not in env[name].split(' '):
|
||||
env[name] += ' ' + value
|
||||
extend('CLIENT_env1', 'CLIENT2')
|
||||
extend('CLIENT_env1', 'CLIENT2_env1')
|
||||
|
||||
|
||||
def domain_from_env(env, config, index, domain_type):
|
||||
# Roles available in the domain depend on the type of the domain
|
||||
# Unix machines are added only to the IPA domains, Windows machines
|
||||
# only to the AD domains
|
||||
if domain_type == 'IPA':
|
||||
master_role = 'MASTER'
|
||||
else:
|
||||
master_role = domain_type
|
||||
|
||||
env_suffix = '_env%s' % index
|
||||
|
||||
master_env = '%s%s' % (master_role, env_suffix)
|
||||
hostname, _dot, domain_name = env[master_env].partition('.')
|
||||
domain = Domain(config, domain_name, domain_type)
|
||||
|
||||
for role in _roles_from_env(domain, env, env_suffix):
|
||||
prefix = '' if role in domain.static_roles else TESTHOST_PREFIX
|
||||
value = env.get('%s%s%s' % (prefix, role.upper(), env_suffix), '')
|
||||
|
||||
for host_index, hostname in enumerate(value.split(), start=1):
|
||||
host = host_from_env(env, domain, hostname, role,
|
||||
host_index, index)
|
||||
domain.hosts.append(host)
|
||||
|
||||
if not domain.hosts:
|
||||
raise ValueError('No hosts defined for %s' % env_suffix)
|
||||
|
||||
return domain
|
||||
|
||||
|
||||
def _roles_from_env(domain, env, env_suffix):
|
||||
for role in domain.static_roles:
|
||||
yield role
|
||||
|
||||
# Extra roles are defined via env variables of form TESTHOST_key_envX
|
||||
roles = set()
|
||||
for var in sorted(env):
|
||||
if var.startswith(TESTHOST_PREFIX) and var.endswith(env_suffix):
|
||||
variable_split = var.split('_')
|
||||
role_name = '_'.join(variable_split[1:-1])
|
||||
if (role_name and not role_name[-1].isdigit()):
|
||||
roles.add(role_name.lower())
|
||||
for role in sorted(roles):
|
||||
yield role
|
||||
|
||||
|
||||
def domain_to_env(domain, **kwargs):
|
||||
"""Return environment variables specific to this domain"""
|
||||
env = domain.config.to_env(**kwargs)
|
||||
|
||||
env['DOMAIN'] = domain.name
|
||||
env['RELM'] = domain.realm
|
||||
env['BASEDN'] = str(domain.basedn)
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def host_from_env(env, domain, hostname, role, index, domain_index):
|
||||
ip = env.get('BEAKER%s%s_IP_env%s' %
|
||||
(role.upper(), index, domain_index), None)
|
||||
external_hostname = env.get(
|
||||
'BEAKER%s%s_env%s' % (role.upper(), index, domain_index), None)
|
||||
|
||||
cls = domain.get_host_class({})
|
||||
|
||||
return cls(domain, hostname, role, ip=ip,
|
||||
external_hostname=external_hostname)
|
||||
|
||||
|
||||
def host_to_env(host, **kwargs):
|
||||
"""Return environment variables specific to this host"""
|
||||
env = host.domain.to_env(**kwargs)
|
||||
|
||||
index = host.domain.hosts.index(host) + 1
|
||||
domain_index = host.config.domains.index(host.domain) + 1
|
||||
|
||||
role = host.role.upper()
|
||||
if host.role != 'master':
|
||||
role += str(index)
|
||||
|
||||
env['MYHOSTNAME'] = host.hostname
|
||||
env['MYBEAKERHOSTNAME'] = host.external_hostname
|
||||
env['MYIP'] = host.ip
|
||||
|
||||
prefix = ('' if host.role in host.domain.static_roles
|
||||
else TESTHOST_PREFIX)
|
||||
env_suffix = '_env%s' % domain_index
|
||||
env['MYROLE'] = '%s%s%s' % (prefix, role, env_suffix)
|
||||
env['MYENV'] = str(domain_index)
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def env_to_script(env):
|
||||
return ''.join(['export %s=%s\n' % (key, ipautil.shell_quote(value))
|
||||
for key, value in env.items()])
|
||||
67
ipatests/pytest_ipa/integration/fips.py
Normal file
67
ipatests/pytest_ipa/integration/fips.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#
|
||||
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
"""FIPS testing helpers
|
||||
|
||||
Based on userspace FIPS mode by Ondrej Moris.
|
||||
|
||||
Userspace FIPS mode fakes a Kernel in FIPS enforcing mode. User space
|
||||
programs behave like the Kernel was booted in FIPS enforcing mode. Kernel
|
||||
space code still runs in standard mode.
|
||||
"""
|
||||
import os
|
||||
from ipaplatform.paths import paths
|
||||
|
||||
FIPS_OVERLAY_DIR = "/var/tmp/userspace-fips"
|
||||
FIPS_OVERLAY = os.path.join(FIPS_OVERLAY_DIR, "fips_enabled")
|
||||
SYSTEM_FIPS = "/etc/system-fips"
|
||||
|
||||
|
||||
def is_fips_enabled(host):
|
||||
"""Check if host has """
|
||||
result = host.run_command(
|
||||
["cat", paths.PROC_FIPS_ENABLED], raiseonerr=False
|
||||
)
|
||||
if result.returncode == 1:
|
||||
# FIPS mode not available
|
||||
return None
|
||||
elif result.returncode == 0:
|
||||
return result.stdout_text.strip() == "1"
|
||||
else:
|
||||
raise RuntimeError(result.stderr_text)
|
||||
|
||||
|
||||
def enable_userspace_fips(host):
|
||||
# create /etc/system-fips
|
||||
host.put_file_contents(SYSTEM_FIPS, "# userspace fips\n")
|
||||
# fake Kernel FIPS mode with bind mount
|
||||
host.run_command(["mkdir", "-p", FIPS_OVERLAY_DIR])
|
||||
host.put_file_contents(FIPS_OVERLAY, "1\n")
|
||||
host.run_command(
|
||||
["mount", "--bind", FIPS_OVERLAY, paths.PROC_FIPS_ENABLED]
|
||||
)
|
||||
# set crypto policy to FIPS mode
|
||||
host.run_command(["update-crypto-policies", "--show"])
|
||||
host.run_command(["update-crypto-policies", "--set", "FIPS"])
|
||||
# sanity check
|
||||
assert is_fips_enabled(host)
|
||||
result = host.run_command(
|
||||
["openssl", "md5", "/dev/null"], raiseonerr=False
|
||||
)
|
||||
assert result.returncode == 1
|
||||
assert "EVP_DigestInit_ex:disabled for FIPS" in result.stderr_text
|
||||
|
||||
|
||||
def disable_userspace_fips(host):
|
||||
host.run_command(["rm", "-f", SYSTEM_FIPS])
|
||||
host.run_command(["update-crypto-policies", "--set", "DEFAULT"])
|
||||
result = host.run_command(
|
||||
["umount", paths.PROC_FIPS_ENABLED], raiseonerr=False
|
||||
)
|
||||
host.run_command(["rm", "-rf", FIPS_OVERLAY_DIR])
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(result.stderr_text)
|
||||
|
||||
# sanity check
|
||||
assert not is_fips_enabled(host)
|
||||
host.run_command(["openssl", "md5", "/dev/null"])
|
||||
277
ipatests/pytest_ipa/integration/firewall.py
Normal file
277
ipatests/pytest_ipa/integration/firewall.py
Normal file
@@ -0,0 +1,277 @@
|
||||
#
|
||||
# Copyright (C) 2018 FreeIPA Contributors. See COPYING for license
|
||||
#
|
||||
|
||||
"""Firewall class for integration testing using firewalld"""
|
||||
|
||||
import abc
|
||||
|
||||
from ipapython import ipautil
|
||||
|
||||
|
||||
class FirewallBase(abc.ABC):
|
||||
def __init__(self, host):
|
||||
"""Initialize with host where firewall changes should be applied"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self):
|
||||
"""Enable and start firewall service"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def enable_service(self, service):
|
||||
"""Enable firewall rules for service"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def disable_service(self, service):
|
||||
"""Disable firewall rules for service"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def enable_services(self, services):
|
||||
"""Enable firewall rules for list of services"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def disable_services(self, services):
|
||||
"""Disable firewall rules for list of services"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def passthrough_rule(self, rule, ipv=None):
|
||||
"""Generic method to get direct passthrough rules to
|
||||
rule is an ip[6]tables rule without using the ip[6]tables command.
|
||||
The rule will per default be added to the IPv4 and IPv6 firewall.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
The rule is added to the direct sub chain of the chain that is used
|
||||
in the rule"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_passthrough_rules(self, rules, ipv=None):
|
||||
"""Add passthough rules to the end of the chain
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def prepend_passthrough_rules(self, rules, ipv=None):
|
||||
"""Insert passthough rules starting at position 1 as a block
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_passthrough_rules(self, rules, ipv=None):
|
||||
"""Remove passthrough rules
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpFirewall(FirewallBase):
|
||||
"""
|
||||
no-op firewall is intended for platforms which haven't high level firewall
|
||||
backend.
|
||||
"""
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
def enable_service(self, service):
|
||||
pass
|
||||
|
||||
def disable_service(self, service):
|
||||
pass
|
||||
|
||||
def enable_services(self, services):
|
||||
pass
|
||||
|
||||
def disable_services(self, services):
|
||||
pass
|
||||
|
||||
def passthrough_rule(self, rule, ipv=None):
|
||||
pass
|
||||
|
||||
def add_passthrough_rules(self, rules, ipv=None):
|
||||
pass
|
||||
|
||||
def prepend_passthrough_rules(self, rules, ipv=None):
|
||||
pass
|
||||
|
||||
def remove_passthrough_rules(self, rules, ipv=None):
|
||||
pass
|
||||
|
||||
|
||||
class FirewallD(FirewallBase):
|
||||
def __init__(self, host):
|
||||
"""Initialize with host where firewall changes should be applied"""
|
||||
self.host = host
|
||||
|
||||
def run(self):
|
||||
# Unmask firewalld service
|
||||
self.host.run_command(["systemctl", "unmask", "firewalld"])
|
||||
# Enable firewalld service
|
||||
self.host.run_command(["systemctl", "enable", "firewalld"])
|
||||
# Start firewalld service
|
||||
self.host.run_command(["systemctl", "start", "firewalld"])
|
||||
|
||||
def _rp_action(self, args):
|
||||
"""Run-time and permanant firewall action"""
|
||||
cmd = ["firewall-cmd"]
|
||||
cmd.extend(args)
|
||||
|
||||
# Run-time part
|
||||
result = self.host.run_command(cmd, raiseonerr=False)
|
||||
if result.returncode not in [0, 11, 12]:
|
||||
# Ignore firewalld error codes:
|
||||
# 11 is ALREADY_ENABLED
|
||||
# 12 is NOT_ENABLED
|
||||
raise ipautil.CalledProcessError(result.returncode, cmd,
|
||||
result.stdout_text,
|
||||
result.stderr_text)
|
||||
|
||||
# Permanent part
|
||||
result = self.host.run_command(cmd + ["--permanent"],
|
||||
raiseonerr=False)
|
||||
if result.returncode not in [0, 11, 12]:
|
||||
# Ignore firewalld error codes:
|
||||
# 11 is ALREADY_ENABLED
|
||||
# 12 is NOT_ENABLED
|
||||
raise ipautil.CalledProcessError(result.returncode, cmd,
|
||||
result.stdout_text,
|
||||
result.stderr_text)
|
||||
|
||||
def enable_service(self, service):
|
||||
"""Enable firewall service in firewalld runtime and permanent
|
||||
environment"""
|
||||
self._rp_action(["--add-service", service])
|
||||
|
||||
def disable_service(self, service):
|
||||
"""Disable firewall service in firewalld runtime and permanent
|
||||
environment"""
|
||||
self._rp_action(["--remove-service", service])
|
||||
|
||||
def enable_services(self, services):
|
||||
"""Enable list of firewall services in firewalld runtime and
|
||||
permanent environment"""
|
||||
args = []
|
||||
for service in services:
|
||||
args.extend(["--add-service", service])
|
||||
self._rp_action(args)
|
||||
|
||||
def disable_services(self, services):
|
||||
"""Disable list of firewall services in firewalld runtime and
|
||||
permanent environment"""
|
||||
args = []
|
||||
for service in services:
|
||||
args.extend(["--remove-service", service])
|
||||
self._rp_action(args)
|
||||
|
||||
def passthrough_rule(self, rule, ipv=None):
|
||||
"""Generic method to get direct passthrough rules to firewalld
|
||||
rule is an ip[6]tables rule without using the ip[6]tables command.
|
||||
The rule will per default be added to the IPv4 and IPv6 firewall.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
The rule is added to the direct sub chain of the chain that is used
|
||||
in the rule"""
|
||||
if ipv is None:
|
||||
ipvs = ["ipv4", "ipv6"]
|
||||
else:
|
||||
ipvs = [ipv]
|
||||
for _ipv in ipvs:
|
||||
args = ["firewall-cmd", "--direct", "--passthrough", _ipv] + rule
|
||||
self.host.run_command(args)
|
||||
|
||||
def add_passthrough_rules(self, rules, ipv=None):
|
||||
"""Add passthough rules to the end of the chain
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
for rule in rules:
|
||||
self.passthrough_rule(["-A"] + rule, ipv)
|
||||
|
||||
def prepend_passthrough_rules(self, rules, ipv=None):
|
||||
"""Insert passthough rules starting at position 1 as a block
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
# first rule number in iptables is 1
|
||||
for i, rule in enumerate(rules, start=1):
|
||||
self.passthrough_rule(["-I", rule[0], str(i)] + rule[1:], ipv)
|
||||
|
||||
def remove_passthrough_rules(self, rules, ipv=None):
|
||||
"""Remove passthrough rules
|
||||
rules is a list of ip[6]tables rules, where the first entry of each
|
||||
rule is the chain. No --append/-A, --delete/-D should be added before
|
||||
the chain name, beacuse these are added by the method.
|
||||
If there are IP version specific parts in the rule, please make sure
|
||||
that ipv is adapted properly.
|
||||
"""
|
||||
for rule in rules:
|
||||
self.passthrough_rule(["-D"] + rule, ipv)
|
||||
|
||||
|
||||
class Firewall(FirewallBase):
|
||||
"""
|
||||
Depending on the ipaplatform proxy firewall tasks to the actual backend.
|
||||
Current supported backends: firewalld and no-op firewall.
|
||||
"""
|
||||
def __init__(self, host):
|
||||
"""Initialize with host where firewall changes should be applied"""
|
||||
# break circular dependency
|
||||
from .tasks import get_platform
|
||||
|
||||
self.host = host
|
||||
platform = get_platform(host)
|
||||
|
||||
firewalls = {
|
||||
'rhel': FirewallD,
|
||||
'fedora': FirewallD,
|
||||
'debian': FirewallD,
|
||||
'ubuntu': FirewallD,
|
||||
'altlinux': NoOpFirewall,
|
||||
}
|
||||
if platform not in firewalls:
|
||||
raise ValueError(
|
||||
"Platform {} doesn't support Firewall".format(platform))
|
||||
self.firewall = firewalls[platform](self.host)
|
||||
self.run()
|
||||
|
||||
def run(self):
|
||||
self.firewall.run()
|
||||
|
||||
def enable_service(self, service):
|
||||
self.firewall.enable_service(service)
|
||||
|
||||
def disable_service(self, service):
|
||||
self.firewall.disable_service(service)
|
||||
|
||||
def enable_services(self, services):
|
||||
self.firewall.enable_services(services)
|
||||
|
||||
def disable_services(self, services):
|
||||
self.firewall.disable_services(services)
|
||||
|
||||
def passthrough_rule(self, rule, ipv=None):
|
||||
self.firewall.passthrough_rule(rule, ipv)
|
||||
|
||||
def add_passthrough_rules(self, rules, ipv=None):
|
||||
self.firewall.add_passthrough_rules(rules, ipv)
|
||||
|
||||
def prepend_passthrough_rules(self, rules, ipv=None):
|
||||
self.firewall.prepend_passthrough_rules(rules, ipv)
|
||||
|
||||
def remove_passthrough_rules(self, rules, ipv=None):
|
||||
self.firewall.remove_passthrough_rules(rules, ipv)
|
||||
215
ipatests/pytest_ipa/integration/host.py
Normal file
215
ipatests/pytest_ipa/integration/host.py
Normal file
@@ -0,0 +1,215 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2013 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Host class for integration testing"""
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import ldap
|
||||
import pytest_multihost.host
|
||||
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython import ipaldap
|
||||
|
||||
from .fips import (
|
||||
is_fips_enabled, enable_userspace_fips, disable_userspace_fips
|
||||
)
|
||||
from .transport import IPAOpenSSHTransport
|
||||
|
||||
FIPS_NOISE_RE = re.compile(br"FIPS mode initialized\r?\n?")
|
||||
|
||||
|
||||
class LDAPClientWithoutCertCheck(ipaldap.LDAPClient):
|
||||
"""Adds an option to disable certificate check for TLS connection
|
||||
|
||||
To disable certificate validity check create client with added option
|
||||
no_certificate_check:
|
||||
client = LDAPClientWithoutCertCheck(..., no_certificate_check=True)
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._no_certificate_check = kwargs.pop(
|
||||
'no_certificate_check', False)
|
||||
super(LDAPClientWithoutCertCheck, self).__init__(*args, **kwargs)
|
||||
|
||||
def _connect(self):
|
||||
if (self._start_tls and self.protocol == 'ldap' and
|
||||
self._no_certificate_check):
|
||||
with self.error_handler():
|
||||
conn = ipaldap.ldap_initialize(
|
||||
self.ldap_uri, cacertfile=self._cacert)
|
||||
conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT,
|
||||
ldap.OPT_X_TLS_NEVER)
|
||||
conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
|
||||
conn.start_tls_s()
|
||||
return conn
|
||||
else:
|
||||
return super(LDAPClientWithoutCertCheck, self)._connect()
|
||||
|
||||
|
||||
class Host(pytest_multihost.host.Host):
|
||||
"""Representation of a remote IPA host"""
|
||||
|
||||
transport_class = IPAOpenSSHTransport
|
||||
|
||||
def __init__(self, domain, hostname, role, ip=None,
|
||||
external_hostname=None, username=None, password=None,
|
||||
test_dir=None, host_type=None):
|
||||
super().__init__(
|
||||
domain, hostname, role, ip=ip,
|
||||
external_hostname=external_hostname, username=username,
|
||||
password=password, test_dir=test_dir, host_type=host_type
|
||||
)
|
||||
self._fips_mode = None
|
||||
self._userspace_fips = False
|
||||
|
||||
@property
|
||||
def is_fips_mode(self):
|
||||
"""Check and cache if a system is in FIPS mode
|
||||
"""
|
||||
if self._fips_mode is None:
|
||||
self._fips_mode = is_fips_enabled(self)
|
||||
return self._fips_mode
|
||||
|
||||
@property
|
||||
def is_userspace_fips(self):
|
||||
"""Check if host uses fake userspace FIPS
|
||||
"""
|
||||
return self._userspace_fips
|
||||
|
||||
def enable_userspace_fips(self):
|
||||
"""Enable fake userspace FIPS mode
|
||||
|
||||
The call has no effect if the system is already in FIPS mode.
|
||||
|
||||
:return: True if system was modified, else None
|
||||
"""
|
||||
if not self.is_fips_mode:
|
||||
enable_userspace_fips(self)
|
||||
self._fips_mode = True
|
||||
self._userspace_fips = True
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def disable_userspace_fips(self):
|
||||
"""Disable fake userspace FIPS mode
|
||||
|
||||
The call has no effect if userspace FIPS mode is not enabled.
|
||||
|
||||
:return: True if system was modified, else None
|
||||
"""
|
||||
if self.is_userspace_fips:
|
||||
disable_userspace_fips(self)
|
||||
self._userspace_fips = False
|
||||
self._fips_mode = False
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _make_host(domain, hostname, role, ip, external_hostname):
|
||||
# We need to determine the type of the host, this depends on the domain
|
||||
# type, as we assume all Unix machines are in the Unix domain and
|
||||
# all Windows machine in a AD domain
|
||||
|
||||
if domain.type == 'AD':
|
||||
cls = WinHost
|
||||
else:
|
||||
cls = Host
|
||||
|
||||
return cls(
|
||||
domain,
|
||||
hostname,
|
||||
role,
|
||||
ip=ip,
|
||||
external_hostname=external_hostname
|
||||
)
|
||||
|
||||
def ldap_connect(self):
|
||||
"""Return an LDAPClient authenticated to this host as directory manager
|
||||
"""
|
||||
self.log.info('Connecting to LDAP at %s', self.external_hostname)
|
||||
# get IPA CA cert to establish a secure connection
|
||||
cacert = self.get_file_contents(paths.IPA_CA_CRT)
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(cacert)
|
||||
f.flush()
|
||||
|
||||
hostnames_mismatch = self.hostname != self.external_hostname
|
||||
conn = LDAPClientWithoutCertCheck.from_hostname_secure(
|
||||
self.external_hostname,
|
||||
cacert=f.name,
|
||||
no_certificate_check=hostnames_mismatch)
|
||||
binddn = self.config.dirman_dn
|
||||
self.log.info('LDAP bind as %s', binddn)
|
||||
conn.simple_bind(binddn, self.config.dirman_password)
|
||||
|
||||
# The CA cert file has been loaded into the SSL_CTX and is no
|
||||
# longer required.
|
||||
|
||||
return conn
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, env, domain, hostname, role, index, domain_index):
|
||||
from ipatests.pytest_ipa.integration.env_config import host_from_env
|
||||
return host_from_env(env, domain, hostname, role, index, domain_index)
|
||||
|
||||
def to_env(self, **kwargs):
|
||||
from ipatests.pytest_ipa.integration.env_config import host_to_env
|
||||
return host_to_env(self, **kwargs)
|
||||
|
||||
def run_command(self, argv, set_env=True, stdin_text=None,
|
||||
log_stdout=True, raiseonerr=True,
|
||||
cwd=None, bg=False, encoding='utf-8', ok_returncode=0):
|
||||
"""Wrapper around run_command to log stderr on raiseonerr=True
|
||||
|
||||
:param ok_returncode: return code considered to be correct,
|
||||
you can pass an integer or sequence of integers
|
||||
"""
|
||||
result = super().run_command(
|
||||
argv, set_env=set_env, stdin_text=stdin_text,
|
||||
log_stdout=log_stdout, raiseonerr=False, cwd=cwd, bg=bg,
|
||||
encoding=encoding
|
||||
)
|
||||
# in FIPS mode SSH may print noise to stderr, remove the string
|
||||
# "FIPS mode initialized" + optional newline.
|
||||
result.stderr_bytes = FIPS_NOISE_RE.sub(b'', result.stderr_bytes)
|
||||
try:
|
||||
result_ok = result.returncode in ok_returncode
|
||||
except TypeError:
|
||||
result_ok = result.returncode == ok_returncode
|
||||
if not result_ok and raiseonerr:
|
||||
result.log.error('stderr: %s', result.stderr_text)
|
||||
raise subprocess.CalledProcessError(
|
||||
result.returncode, argv,
|
||||
result.stdout_text, result.stderr_text
|
||||
)
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
class WinHost(pytest_multihost.host.WinHost):
|
||||
"""
|
||||
Representation of a remote Windows host.
|
||||
|
||||
This serves as a sketch class once we move from manual preparation of
|
||||
Active Directory to the automated setup.
|
||||
"""
|
||||
transport_class = IPAOpenSSHTransport
|
||||
2477
ipatests/pytest_ipa/integration/tasks.py
Executable file
2477
ipatests/pytest_ipa/integration/tasks.py
Executable file
File diff suppressed because it is too large
Load Diff
48
ipatests/pytest_ipa/integration/transport.py
Normal file
48
ipatests/pytest_ipa/integration/transport.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
"""Enhanced SSH transport for pytest multihost
|
||||
|
||||
Provides SSH password login for OpenSSH transport
|
||||
"""
|
||||
import os
|
||||
|
||||
from pytest_multihost.transport import OpenSSHTransport
|
||||
|
||||
|
||||
class IPAOpenSSHTransport(OpenSSHTransport):
|
||||
def _get_ssh_argv(self):
|
||||
"""Return the path to SSH and options needed for every call"""
|
||||
control_file = os.path.join(self.control_dir.path, "control")
|
||||
known_hosts_file = os.path.join(self.control_dir.path, "known_hosts")
|
||||
|
||||
argv = [
|
||||
"ssh",
|
||||
"-l",
|
||||
self.host.ssh_username,
|
||||
"-o",
|
||||
"ControlPath=%s" % control_file,
|
||||
"-o",
|
||||
"StrictHostKeyChecking=no",
|
||||
"-o",
|
||||
"UserKnownHostsFile=%s" % known_hosts_file,
|
||||
]
|
||||
|
||||
if self.host.ssh_key_filename:
|
||||
key_filename = os.path.expanduser(self.host.ssh_key_filename)
|
||||
argv.extend(["-i", key_filename])
|
||||
elif self.host.ssh_password:
|
||||
password_file = os.path.join(self.control_dir.path, "password")
|
||||
with open(password_file, "w") as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
f.write(self.host.ssh_password)
|
||||
f.write("\n")
|
||||
argv = ["sshpass", f"-f{password_file}"] + argv
|
||||
else:
|
||||
self.log.critical("No SSH credentials configured")
|
||||
raise RuntimeError("No SSH credentials configured")
|
||||
|
||||
argv.append(self.host.external_hostname)
|
||||
self.log.debug("SSH invocation: %s", argv)
|
||||
|
||||
return argv
|
||||
70
ipatests/pytest_ipa/nose_compat.py
Normal file
70
ipatests/pytest_ipa/nose_compat.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# Authors:
|
||||
# Petr Viktorin <pviktori@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2014 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Provides command-line options for very limited Nose compatibility"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from ipapython.ipa_log_manager import Formatter, convert_log_level
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("IPA nosetests compatibility shim")
|
||||
|
||||
group.addoption('--with-xunit', action="store_const",
|
||||
dest="xmlpath", metavar="path", default=None,
|
||||
const=os.environ.get('IPATEST_XUNIT_PATH', './nosetests.xml'),
|
||||
help="create junit-xml style report file at $IPATEST_XUNIT_PATH,"
|
||||
"or nosetests.xml by default")
|
||||
|
||||
group.addoption('--logging-level', action="store",
|
||||
dest="logging_level", metavar="level", default='CRITICAL',
|
||||
help="level for logging to stderr. "
|
||||
"Bypasses pytest logging redirection."
|
||||
"May be used to show progress of long-running tests.")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.getoption('logging_level'):
|
||||
# Forward IPA logging to a normal Python logger. Nose's logcapture plugin
|
||||
# can't work with IPA-managed loggers
|
||||
class LogHandler(logging.Handler):
|
||||
name = 'forwarding log handler'
|
||||
logger = logging.getLogger('IPA')
|
||||
|
||||
def emit(self, record):
|
||||
capture = config.pluginmanager.getplugin('capturemanager')
|
||||
orig_stdout, orig_stderr = sys.stdout, sys.stderr
|
||||
if capture:
|
||||
capture.suspend_global_capture()
|
||||
sys.stderr.write(self.format(record))
|
||||
sys.stderr.write('\n')
|
||||
if capture:
|
||||
capture.resume_global_capture()
|
||||
sys.stdout, sys.stderr = orig_stdout, orig_stderr
|
||||
|
||||
level = convert_log_level(config.getoption('logging_level'))
|
||||
|
||||
handler = LogHandler()
|
||||
handler.setFormatter(Formatter('[%(name)s] %(message)s'))
|
||||
handler.setLevel(level)
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.addHandler(handler)
|
||||
200
ipatests/pytest_ipa/slicing.py
Normal file
200
ipatests/pytest_ipa/slicing.py
Normal file
@@ -0,0 +1,200 @@
|
||||
#
|
||||
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
"""
|
||||
The main purpose of this plugin is to slice a test suite into
|
||||
several pieces to run each within its own test environment(for example,
|
||||
an Agent of Azure Pipelines).
|
||||
|
||||
Tests within a slice are grouped by test modules because not all of the tests
|
||||
within the module are independent from each other.
|
||||
|
||||
Slices are balanced by the number of tests within test module.
|
||||
* Actually, tests should be grouped by the execution duration.
|
||||
This could be achieved by the caching of tests results. Azure Pipelines
|
||||
caching is in development. *
|
||||
To workaround slow tests a dedicated slice is added.
|
||||
|
||||
:param slices: A total number of slices to split the test suite into
|
||||
:param slice-num: A number of slice to run
|
||||
:param slice-dedicated: A file path to the module to run in its own slice
|
||||
|
||||
**Examples**
|
||||
|
||||
Inputs:
|
||||
ipa-run-tests test_cmdline --collectonly -qq
|
||||
...
|
||||
test_cmdline/test_cli.py: 39
|
||||
test_cmdline/test_help.py: 7
|
||||
test_cmdline/test_ipagetkeytab.py: 16
|
||||
...
|
||||
|
||||
* Split tests into 2 slices and run the first one:
|
||||
|
||||
ipa-run-tests --slices=2 --slice-num=1 test_cmdline
|
||||
|
||||
The outcome would be:
|
||||
...
|
||||
Running slice: 1 (46 tests)
|
||||
Modules:
|
||||
test_cmdline/test_cli.py: 39
|
||||
test_cmdline/test_help.py: 7
|
||||
...
|
||||
|
||||
* Split tests into 2 slices, move one module out to its own slice
|
||||
and run the second one
|
||||
|
||||
ipa-run-tests --slices=2 --slice-dedicated=test_cmdline/test_cli.py \
|
||||
--slice-num=2 test_cmdline
|
||||
|
||||
The outcome would be:
|
||||
...
|
||||
Running slice: 2 (23 tests)
|
||||
Modules:
|
||||
test_cmdline/test_ipagetkeytab.py: 16
|
||||
test_cmdline/test_help.py: 7
|
||||
...
|
||||
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("slicing")
|
||||
group.addoption(
|
||||
'--slices', dest='slices_num', type=int,
|
||||
help='The number of slices to split the test suite into')
|
||||
group.addoption(
|
||||
'--slice-num', dest='slice_num', type=int,
|
||||
help='The specific number of slice to run')
|
||||
group.addoption(
|
||||
'--slice-dedicated', action="append", dest='slices_dedicated',
|
||||
help='The file path to the module to run in dedicated slice')
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_collection_modifyitems(session, config, items):
|
||||
yield
|
||||
slice_count = config.getoption('slices_num')
|
||||
slice_id = config.getoption('slice_num')
|
||||
modules_dedicated = config.getoption('slices_dedicated')
|
||||
# deduplicate
|
||||
if modules_dedicated:
|
||||
modules_dedicated = list(set(modules_dedicated))
|
||||
|
||||
# sanity check
|
||||
if not slice_count or not slice_id:
|
||||
return
|
||||
|
||||
# nothing to do
|
||||
if slice_count == 1:
|
||||
return
|
||||
|
||||
if modules_dedicated and len(modules_dedicated) > slice_count:
|
||||
raise ValueError(
|
||||
"Dedicated slice number({}) shouldn't be greater than the number "
|
||||
"of slices({})".format(len(modules_dedicated), slice_count))
|
||||
|
||||
if slice_id > slice_count:
|
||||
raise ValueError(
|
||||
"Slice number({}) shouldn't be greater than the number of slices"
|
||||
"({})".format(slice_id, slice_count))
|
||||
|
||||
modules = []
|
||||
# Calculate modules within collection
|
||||
# Note: modules within pytest collection could be placed in not consecutive
|
||||
# order
|
||||
for number, item in enumerate(items):
|
||||
name = item.nodeid.split("::", 1)[0]
|
||||
if not modules or name != modules[-1]["name"]:
|
||||
modules.append({"name": name, "begin": number, "end": number})
|
||||
else:
|
||||
modules[-1]["end"] = number
|
||||
|
||||
if slice_count > len(modules):
|
||||
raise ValueError(
|
||||
"Total number of slices({}) shouldn't be greater than the number "
|
||||
"of Python test modules({})".format(slice_count, len(modules)))
|
||||
|
||||
slices_dedicated = []
|
||||
if modules_dedicated:
|
||||
slices_dedicated = [
|
||||
[m] for m in modules for x in modules_dedicated if x in m["name"]
|
||||
]
|
||||
if modules_dedicated and len(slices_dedicated) != len(modules_dedicated):
|
||||
raise ValueError(
|
||||
"The number of dedicated slices({}) should be equal to the "
|
||||
"number of dedicated modules({})".format(
|
||||
slices_dedicated, modules_dedicated))
|
||||
|
||||
if (slices_dedicated and len(slices_dedicated) == slice_count and
|
||||
len(slices_dedicated) != len(modules)):
|
||||
raise ValueError(
|
||||
"The total number of slices({}) is not sufficient to run dedicated"
|
||||
" modules({}) as well as usual ones({})".format(
|
||||
slice_count, len(slices_dedicated),
|
||||
len(modules) - len(slices_dedicated)))
|
||||
|
||||
# remove dedicated modules from usual ones
|
||||
for s in slices_dedicated:
|
||||
for m in s:
|
||||
if m in modules:
|
||||
modules.remove(m)
|
||||
|
||||
avail_slice_count = slice_count - len(slices_dedicated)
|
||||
# initialize slices with empty lists
|
||||
slices = [[] for i in range(slice_count)]
|
||||
|
||||
# initialize slices with dedicated ones
|
||||
for sn, s in enumerate(slices_dedicated):
|
||||
slices[sn] = s
|
||||
|
||||
# initial reverse sort by the number of tests in a test module
|
||||
modules.sort(reverse=True, key=lambda x: x["end"] - x["begin"] + 1)
|
||||
reverse = True
|
||||
while modules:
|
||||
for sslice_num, sslice in enumerate(sorted(
|
||||
modules[:avail_slice_count],
|
||||
reverse=reverse, key=lambda x: x["end"] - x["begin"] + 1)):
|
||||
slices[len(slices_dedicated) + sslice_num].append(sslice)
|
||||
|
||||
modules[:avail_slice_count] = []
|
||||
reverse = not reverse
|
||||
|
||||
calc_ntests = sum(x["end"] - x["begin"] + 1 for s in slices for x in s)
|
||||
assert calc_ntests == len(items)
|
||||
assert len(slices) == slice_count
|
||||
|
||||
# the range of the given argument `slice_id` begins with 1(one)
|
||||
sslice = slices[slice_id - 1]
|
||||
|
||||
new_items = []
|
||||
for m in sslice:
|
||||
new_items += items[m["begin"]:m["end"] + 1]
|
||||
items[:] = new_items
|
||||
|
||||
tw = config.get_terminal_writer()
|
||||
if tw:
|
||||
tw.line()
|
||||
tw.write(
|
||||
"Running slice: {} ({} tests)\n".format(
|
||||
slice_id,
|
||||
len(items),
|
||||
),
|
||||
cyan=True,
|
||||
bold=True,
|
||||
)
|
||||
tw.write(
|
||||
"Modules:\n",
|
||||
yellow=True,
|
||||
bold=True,
|
||||
)
|
||||
for module in sslice:
|
||||
tw.write(
|
||||
"{}: {}\n".format(
|
||||
module["name"],
|
||||
module["end"] - module["begin"] + 1),
|
||||
yellow=True,
|
||||
)
|
||||
tw.line()
|
||||
Reference in New Issue
Block a user