Imported Debian patch 4.7.2-3
This commit is contained in:
committed by
Mario Fetka
parent
27edeba051
commit
8bc559c5a1
@@ -26,34 +26,30 @@ import shutil
|
||||
import socket
|
||||
import sys
|
||||
import tempfile
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
from configparser import RawConfigParser
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
# pylint: disable=import-error
|
||||
from six.moves.configparser import RawConfigParser
|
||||
from six.moves.urllib.parse import urlparse, urlunparse
|
||||
# pylint: enable=import-error
|
||||
|
||||
from ipalib import api, errors, x509
|
||||
from ipalib import sysrestore
|
||||
from ipalib.constants import IPAAPI_USER, MAXHOSTNAMELEN
|
||||
from ipalib.install import certmonger, certstore, service
|
||||
from ipalib.constants import IPAAPI_USER
|
||||
from ipalib.install import certmonger, certstore, service, sysrestore
|
||||
from ipalib.install import hostname as hostname_
|
||||
from ipalib.facts import is_ipa_client_configured, is_ipa_configured
|
||||
from ipalib.install.kinit import kinit_keytab, kinit_password
|
||||
from ipalib.install.service import enroll_only, prepare_only
|
||||
from ipalib.rpc import delete_persistent_client_session_data
|
||||
from ipalib.util import (
|
||||
normalize_hostname,
|
||||
no_matching_interface_for_ip_address_warning,
|
||||
validate_hostname,
|
||||
verify_host_resolvable,
|
||||
)
|
||||
from ipaplatform import services
|
||||
from ipaplatform.constants import constants
|
||||
from ipaplatform.paths import paths
|
||||
from ipaplatform.tasks import tasks
|
||||
from ipapython import certdb, kernel_keyring, ipaldap, ipautil, dnsutil
|
||||
from ipapython import certdb, kernel_keyring, ipaldap, ipautil
|
||||
from ipapython.admintool import ScriptError
|
||||
from ipapython.dn import DN
|
||||
from ipapython.install import typing
|
||||
@@ -68,9 +64,8 @@ from ipapython.ipautil import (
|
||||
from ipapython.ssh import SSHPublicKey
|
||||
from ipapython import version
|
||||
|
||||
from . import automount, timeconf, sssd
|
||||
from ipaclient import discovery
|
||||
from ipapython.ipachangeconf import IPAChangeConf
|
||||
from . import automount, ipadiscovery, timeconf, sssd
|
||||
from .ipachangeconf import IPAChangeConf
|
||||
|
||||
NoneType = type(None)
|
||||
|
||||
@@ -268,33 +263,33 @@ def delete_ipa_domain():
|
||||
"No access to the /etc/sssd/sssd.conf file.")
|
||||
|
||||
|
||||
def is_ipa_client_installed(on_master=False):
|
||||
def is_ipa_client_installed(fstore, on_master=False):
|
||||
"""
|
||||
Consider IPA client not installed if nothing is backed up
|
||||
and default.conf file does not exist. If on_master is set to True,
|
||||
the existence of default.conf file is not taken into consideration,
|
||||
since it has been already created by ipa-server-install.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Use 'ipalib.facts.is_ipa_client_configured'",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
|
||||
installed = (
|
||||
fstore.has_files() or (
|
||||
not on_master and os.path.exists(paths.IPA_DEFAULT_CONF)
|
||||
)
|
||||
)
|
||||
return is_ipa_client_configured(on_master)
|
||||
|
||||
return installed
|
||||
|
||||
|
||||
def configure_nsswitch_database(fstore, database, services, preserve=True,
|
||||
append=True, default_value=()):
|
||||
"""
|
||||
This function was deprecated. Use ipaplatform.tasks.
|
||||
|
||||
Edits the specified nsswitch.conf database (e.g. passwd, group, sudoers)
|
||||
to use the specified service(s).
|
||||
|
||||
Arguments:
|
||||
fstore - FileStore to backup the nsswitch.conf
|
||||
database - database configuration that should be ammended,
|
||||
e.g. 'sudoers'
|
||||
e.g. 'sudoers'
|
||||
service - list of services that should be added, e.g. ['sss']
|
||||
preserve - if True, the already configured services will be preserved
|
||||
|
||||
@@ -304,13 +299,51 @@ def configure_nsswitch_database(fstore, database, services, preserve=True,
|
||||
the database is not mentioned in nsswitch.conf), e.g.
|
||||
['files']
|
||||
"""
|
||||
warnings.warn(
|
||||
"Use ipaplatform.tasks.tasks.configure_nsswitch_database",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return tasks.configure_nsswitch_database(fstore, database, services,
|
||||
preserve, append, default_value)
|
||||
|
||||
# Backup the original version of nsswitch.conf, we're going to edit it now
|
||||
if not fstore.has_file(paths.NSSWITCH_CONF):
|
||||
fstore.backup_file(paths.NSSWITCH_CONF)
|
||||
|
||||
conf = IPAChangeConf("IPA Installer")
|
||||
conf.setOptionAssignment(':')
|
||||
|
||||
if preserve:
|
||||
# Read the existing configuration
|
||||
with open(paths.NSSWITCH_CONF, 'r') as f:
|
||||
opts = conf.parse(f)
|
||||
raw_database_entry = conf.findOpts(opts, 'option', database)[1]
|
||||
|
||||
# Detect the list of already configured services
|
||||
if not raw_database_entry:
|
||||
# If there is no database entry, database is not present in
|
||||
# the nsswitch.conf. Set the list of services to the
|
||||
# default list, if passed.
|
||||
configured_services = list(default_value)
|
||||
else:
|
||||
configured_services = raw_database_entry['value'].strip().split()
|
||||
|
||||
# Make sure no service is added if already mentioned in the list
|
||||
added_services = [s for s in services
|
||||
if s not in configured_services]
|
||||
|
||||
# Prepend / append the list of new services
|
||||
if append:
|
||||
new_value = ' ' + ' '.join(configured_services + added_services)
|
||||
else:
|
||||
new_value = ' ' + ' '.join(added_services + configured_services)
|
||||
|
||||
else:
|
||||
# Preserve not set, let's rewrite existing configuration
|
||||
new_value = ' ' + ' '.join(services)
|
||||
|
||||
# Set new services as sources for database
|
||||
opts = [
|
||||
conf.setOption(database, new_value),
|
||||
conf.emptyLine(),
|
||||
]
|
||||
|
||||
conf.changeConf(paths.NSSWITCH_CONF, opts)
|
||||
logger.info("Configured %s in %s", database, paths.NSSWITCH_CONF)
|
||||
|
||||
|
||||
def configure_ipa_conf(
|
||||
@@ -629,9 +662,13 @@ def hardcode_ldap_server(cli_server):
|
||||
"hardcoded server name: %s", cli_server[0])
|
||||
|
||||
|
||||
# Currently this doesn't support templating, but that could be changed in the
|
||||
# future. Note that this function is also called from %post.
|
||||
def configure_krb5_snippet():
|
||||
def configure_krb5_conf(
|
||||
cli_realm, cli_domain, cli_server, cli_kdc, dnsok,
|
||||
filename, client_domain, client_hostname, force=False,
|
||||
configure_sssd=True):
|
||||
|
||||
# First, write a snippet to krb5.conf.d. Currently this doesn't support
|
||||
# templating, but that could be changed in the future.
|
||||
template = os.path.join(
|
||||
paths.USR_SHARE_IPA_CLIENT_DIR,
|
||||
os.path.basename(paths.KRB5_FREEIPA) + ".template"
|
||||
@@ -639,16 +676,6 @@ def configure_krb5_snippet():
|
||||
shutil.copy(template, paths.KRB5_FREEIPA)
|
||||
os.chmod(paths.KRB5_FREEIPA, 0o644)
|
||||
|
||||
tasks.restore_context(paths.KRB5_FREEIPA)
|
||||
|
||||
|
||||
def configure_krb5_conf(
|
||||
cli_realm, cli_domain, cli_server, cli_kdc, dnsok,
|
||||
filename, client_domain, client_hostname, force=False,
|
||||
configure_sssd=True):
|
||||
# First, write a snippet to krb5.conf.d.
|
||||
configure_krb5_snippet()
|
||||
|
||||
# Then, perform the rest of our configuration into krb5.conf itself.
|
||||
krbconf = IPAChangeConf("IPA Installer")
|
||||
krbconf.setOptionAssignment((" = ", " "))
|
||||
@@ -780,16 +807,7 @@ def configure_krb5_conf(
|
||||
def configure_certmonger(
|
||||
fstore, subject_base, cli_realm, hostname, options, ca_enabled):
|
||||
|
||||
cmonger = services.knownservices.certmonger
|
||||
if not options.request_cert:
|
||||
# Conditionally restart certmonger to pick up the new IPA
|
||||
# configuration.
|
||||
try:
|
||||
cmonger.try_restart()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to conditionally restart the %s daemon: %s",
|
||||
cmonger.service_name, str(e))
|
||||
return
|
||||
|
||||
if not ca_enabled:
|
||||
@@ -804,6 +822,7 @@ def configure_certmonger(
|
||||
# which principal name to use when requesting certs.
|
||||
certmonger.add_principal_to_cas(principal)
|
||||
|
||||
cmonger = services.knownservices.certmonger
|
||||
try:
|
||||
cmonger.enable()
|
||||
cmonger.start()
|
||||
@@ -928,7 +947,9 @@ def configure_sssd_conf(
|
||||
"Unable to activate the SUDO service in SSSD config.")
|
||||
|
||||
sssdconfig.activate_service('sudo')
|
||||
tasks.enable_sssd_sudo(fstore)
|
||||
configure_nsswitch_database(
|
||||
fstore, 'sudoers', ['sss'],
|
||||
default_value=['files'])
|
||||
|
||||
domain.add_provider('ipa', 'id')
|
||||
|
||||
@@ -1017,13 +1038,8 @@ def sssd_enable_service(sssdconfig, name):
|
||||
return sssdconfig.get_service(name)
|
||||
|
||||
|
||||
def sssd_enable_ifp(sssdconfig, allow_httpd=False):
|
||||
def sssd_enable_ifp(sssdconfig):
|
||||
"""Enable and configure libsss_simpleifp plugin
|
||||
|
||||
Allow the ``ipaapi`` user to access IFP. In case allow_httpd is true,
|
||||
the Apache HTTPd user is also allowed to access IFP. For smart card
|
||||
authentication, mod_lookup_identity must be allowed to access user
|
||||
information.
|
||||
"""
|
||||
service = sssd_enable_service(sssdconfig, 'ifp')
|
||||
if service is None:
|
||||
@@ -1042,8 +1058,6 @@ def sssd_enable_ifp(sssdconfig, allow_httpd=False):
|
||||
uids.add('root')
|
||||
# allow IPA API to access IFP
|
||||
uids.add(IPAAPI_USER)
|
||||
if allow_httpd:
|
||||
uids.add(constants.HTTPD_USER)
|
||||
service.set_option('allowed_uids', ', '.join(sorted(uids)))
|
||||
sssdconfig.save_service(service)
|
||||
|
||||
@@ -1117,6 +1131,7 @@ def configure_ssh_config(fstore, options):
|
||||
changes['GlobalKnownHostsFile'] = paths.SSSD_PUBCONF_KNOWN_HOSTS
|
||||
if options.trust_sshfp:
|
||||
changes['VerifyHostKeyDNS'] = 'yes'
|
||||
changes['HostKeyAlgorithms'] = 'ssh-rsa,ssh-dss'
|
||||
|
||||
change_ssh_config(paths.SSH_CONFIG, changes, ['Host', 'Match'])
|
||||
logger.info('Configured %s', paths.SSH_CONFIG)
|
||||
@@ -1131,29 +1146,6 @@ def configure_sshd_config(fstore, options):
|
||||
|
||||
fstore.backup_file(paths.SSHD_CONFIG)
|
||||
|
||||
# If openssh-server >= 8.2, the config needs to go in a new snippet
|
||||
# in /etc/ssh/sshd_config.d/04-ipa.conf
|
||||
# instead of /etc/ssh/sshd_config file
|
||||
def sshd_version_supports_include():
|
||||
with open(paths.SSHD_CONFIG, 'r') as f:
|
||||
for line in f:
|
||||
if re.match(r"^Include\s", line):
|
||||
return True
|
||||
return False
|
||||
|
||||
if sshd_version_supports_include():
|
||||
create_sshd_ipa_config(options)
|
||||
else:
|
||||
modify_sshd_config(options)
|
||||
|
||||
if sshd.is_running():
|
||||
try:
|
||||
sshd.restart()
|
||||
except Exception as e:
|
||||
log_service_error(sshd.service_name, 'restart', e)
|
||||
|
||||
|
||||
def modify_sshd_config(options):
|
||||
changes = {
|
||||
'PubkeyAuthentication': 'yes',
|
||||
'KerberosAuthentication': 'no',
|
||||
@@ -1202,24 +1194,11 @@ def modify_sshd_config(options):
|
||||
change_ssh_config(paths.SSHD_CONFIG, changes, ['Match'])
|
||||
logger.info('Configured %s', paths.SSHD_CONFIG)
|
||||
|
||||
|
||||
def create_sshd_ipa_config(options):
|
||||
"""Add the IPA snippet for sshd"""
|
||||
sssd_sshd_options = ""
|
||||
if options.sssd and os.path.isfile(paths.SSS_SSH_AUTHORIZEDKEYS):
|
||||
sssd_sshd_options = textwrap.dedent("""\
|
||||
AuthorizedKeysCommand {}
|
||||
AuthorizedKeysCommandUser nobody
|
||||
""").format(paths.SSS_SSH_AUTHORIZEDKEYS)
|
||||
|
||||
ipautil.copy_template_file(
|
||||
os.path.join(paths.SSHD_IPA_CONFIG_TEMPLATE),
|
||||
paths.SSHD_IPA_CONFIG,
|
||||
dict(
|
||||
SSSD_SSHD_OPTIONS=sssd_sshd_options,
|
||||
)
|
||||
)
|
||||
logger.info('Configured %s', paths.SSHD_IPA_CONFIG)
|
||||
if sshd.is_running():
|
||||
try:
|
||||
sshd.restart()
|
||||
except Exception as e:
|
||||
log_service_error(sshd.service_name, 'restart', e)
|
||||
|
||||
|
||||
def configure_automount(options):
|
||||
@@ -1440,7 +1419,7 @@ def verify_dns_update(fqdn, ips):
|
||||
logger.debug('DNS resolver: Query: %s IN %s',
|
||||
fqdn, dns.rdatatype.to_text(record_type))
|
||||
try:
|
||||
answers = dnsutil.resolve(fqdn, record_type)
|
||||
answers = dns.resolver.query(fqdn, record_type)
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
logger.debug('DNS resolver: No record.')
|
||||
except dns.resolver.NoNameservers:
|
||||
@@ -1460,9 +1439,10 @@ def verify_dns_update(fqdn, ips):
|
||||
missing_reverse = [str(ip) for ip in ips]
|
||||
for ip in ips:
|
||||
ip_str = str(ip)
|
||||
logger.debug('DNS resolver: Query: %s IN PTR', ip_str)
|
||||
addr = dns.reversename.from_address(ip_str)
|
||||
logger.debug('DNS resolver: Query: %s IN PTR', addr)
|
||||
try:
|
||||
answers = dnsutil.resolve_address(ip_str)
|
||||
answers = dns.resolver.query(addr, dns.rdatatype.PTR)
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
logger.debug('DNS resolver: No record.')
|
||||
except dns.resolver.NoNameservers:
|
||||
@@ -1574,13 +1554,12 @@ def update_ssh_keys(hostname, ssh_dir, create_sshfp):
|
||||
continue
|
||||
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
line = line[:-1].lstrip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
try:
|
||||
pubkey = SSHPublicKey(line)
|
||||
except (ValueError, UnicodeDecodeError) as e:
|
||||
logger.debug("Decoding line '%s' failed: %s", line, e)
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
continue
|
||||
logger.info("Adding SSH public key from %s", filename)
|
||||
pubkeys.append(pubkey)
|
||||
@@ -1652,7 +1631,8 @@ def cert_summary(msg, certs, indent=' '):
|
||||
|
||||
|
||||
def get_certs_from_ldap(server, base_dn, realm, ca_enabled):
|
||||
conn = ipaldap.LDAPClient.from_hostname_plain(server)
|
||||
ldap_uri = ipaldap.get_ldap_uri(server)
|
||||
conn = ipaldap.LDAPClient(ldap_uri)
|
||||
try:
|
||||
conn.gssapi_bind()
|
||||
certs = certstore.get_ca_certs(conn, base_dn, realm, ca_enabled)
|
||||
@@ -1937,9 +1917,9 @@ def get_ca_certs(fstore, options, server, basedn, realm):
|
||||
if os.path.exists(ca_file):
|
||||
try:
|
||||
os.unlink(ca_file)
|
||||
except OSError as e2:
|
||||
except OSError as e:
|
||||
logger.error(
|
||||
"Failed to remove '%s': %s", ca_file, e2)
|
||||
"Failed to remove '%s': %s", ca_file, e)
|
||||
raise errors.FileError(
|
||||
reason=u"cannot write certificate file '%s': %s" % (
|
||||
ca_file, e)
|
||||
@@ -2076,6 +2056,8 @@ def install_check(options):
|
||||
cli_domain_source = 'Unknown source'
|
||||
cli_server_source = 'Unknown source'
|
||||
|
||||
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
||||
|
||||
if not os.getegid() == 0:
|
||||
raise ScriptError(
|
||||
"You must be root to run ipa-client-install.",
|
||||
@@ -2083,7 +2065,7 @@ def install_check(options):
|
||||
|
||||
tasks.check_selinux_status()
|
||||
|
||||
if is_ipa_client_configured(on_master=options.on_master):
|
||||
if is_ipa_client_installed(fstore, on_master=options.on_master):
|
||||
logger.error("IPA client is already configured on this system.")
|
||||
logger.info(
|
||||
"If you want to reinstall the IPA client, uninstall it first "
|
||||
@@ -2096,13 +2078,10 @@ def install_check(options):
|
||||
try:
|
||||
timeconf.check_timedate_services()
|
||||
except timeconf.NTPConflictingService as e:
|
||||
print(
|
||||
"WARNING: conflicting time&date synchronization service "
|
||||
"'{}' will be disabled in favor of chronyd\n".format(
|
||||
e.conflicting_service
|
||||
)
|
||||
)
|
||||
|
||||
print("WARNING: conflicting time&date synchronization service '{}'"
|
||||
" will be disabled".format(e.conflicting_service))
|
||||
print("in favor of chronyd")
|
||||
print("")
|
||||
except timeconf.NTPConfigurationError:
|
||||
pass
|
||||
|
||||
@@ -2134,13 +2113,6 @@ def install_check(options):
|
||||
"Invalid hostname, '{}' must not be used.".format(hostname),
|
||||
rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
try:
|
||||
validate_hostname(hostname, maxlen=MAXHOSTNAMELEN)
|
||||
except ValueError as e:
|
||||
raise ScriptError(
|
||||
'invalid hostname: {}'.format(e),
|
||||
rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
# --no-sssd is not supported any more for rhel-based distros
|
||||
if not tasks.is_nosssd_supported() and not options.sssd:
|
||||
raise ScriptError(
|
||||
@@ -2157,14 +2129,6 @@ def install_check(options):
|
||||
"authentication resources",
|
||||
rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
# --mkhomedir is not supported by fedora_container and rhel_container
|
||||
if not tasks.is_mkhomedir_supported() and options.mkhomedir:
|
||||
raise ScriptError(
|
||||
"Option '--mkhomedir' is incompatible with the 'authselect' tool "
|
||||
"provided by this distribution for configuring system "
|
||||
"authentication resources",
|
||||
rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
# when installing with '--no-sssd' option, check whether nss-ldap is
|
||||
# installed
|
||||
if not options.sssd:
|
||||
@@ -2210,7 +2174,7 @@ def install_check(options):
|
||||
raise ScriptError(rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
# Create the discovery instance
|
||||
ds = discovery.IPADiscovery()
|
||||
ds = ipadiscovery.IPADiscovery()
|
||||
|
||||
ret = ds.search(
|
||||
domain=options.domain,
|
||||
@@ -2233,22 +2197,22 @@ def install_check(options):
|
||||
print_port_conf_info()
|
||||
raise ScriptError(rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
if ret == discovery.BAD_HOST_CONFIG:
|
||||
if ret == ipadiscovery.BAD_HOST_CONFIG:
|
||||
logger.error("Can't get the fully qualified name of this host")
|
||||
logger.info("Check that the client is properly configured")
|
||||
raise ScriptError(rval=CLIENT_INSTALL_ERROR)
|
||||
if ret == discovery.NOT_FQDN:
|
||||
if ret == ipadiscovery.NOT_FQDN:
|
||||
raise ScriptError(
|
||||
"{} is not a fully-qualified hostname".format(hostname),
|
||||
rval=CLIENT_INSTALL_ERROR)
|
||||
if ret in (discovery.NO_LDAP_SERVER, discovery.NOT_IPA_SERVER) \
|
||||
if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
|
||||
or not ds.domain:
|
||||
if ret == discovery.NO_LDAP_SERVER:
|
||||
if ret == ipadiscovery.NO_LDAP_SERVER:
|
||||
if ds.server:
|
||||
logger.debug("%s is not an LDAP server", ds.server)
|
||||
else:
|
||||
logger.debug("No LDAP server found")
|
||||
elif ret == discovery.NOT_IPA_SERVER:
|
||||
elif ret == ipadiscovery.NOT_IPA_SERVER:
|
||||
if ds.server:
|
||||
logger.debug("%s is not an IPA server", ds.server)
|
||||
else:
|
||||
@@ -2285,7 +2249,7 @@ def install_check(options):
|
||||
|
||||
client_domain = hostname[hostname.find(".")+1:]
|
||||
|
||||
if ret in (discovery.NO_LDAP_SERVER, discovery.NOT_IPA_SERVER) \
|
||||
if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
|
||||
or not ds.server:
|
||||
logger.debug("IPA Server not found")
|
||||
if options.server:
|
||||
@@ -2337,13 +2301,13 @@ def install_check(options):
|
||||
cli_server_source = ds.server_source
|
||||
logger.debug("will use discovered server: %s", cli_server[0])
|
||||
|
||||
if ret == discovery.NOT_IPA_SERVER:
|
||||
if ret == ipadiscovery.NOT_IPA_SERVER:
|
||||
logger.error("%s is not an IPA v2 Server.", cli_server[0])
|
||||
print_port_conf_info()
|
||||
logger.debug("(%s: %s)", cli_server[0], cli_server_source)
|
||||
raise ScriptError(rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
if ret == discovery.NO_ACCESS_TO_LDAP:
|
||||
if ret == ipadiscovery.NO_ACCESS_TO_LDAP:
|
||||
logger.warning("Anonymous access to the LDAP server is disabled.")
|
||||
logger.info("Proceeding without strict verification.")
|
||||
logger.info(
|
||||
@@ -2351,7 +2315,7 @@ def install_check(options):
|
||||
"has been explicitly restricted.")
|
||||
ret = 0
|
||||
|
||||
if ret == discovery.NO_TLS_LDAP:
|
||||
if ret == ipadiscovery.NO_TLS_LDAP:
|
||||
logger.warning(
|
||||
"The LDAP server requires TLS is but we do not have the CA.")
|
||||
logger.info("Proceeding without strict verification.")
|
||||
@@ -2396,11 +2360,6 @@ def install_check(options):
|
||||
"Proceed with fixed values and no DNS discovery?", False):
|
||||
raise ScriptError(rval=CLIENT_INSTALL_ERROR)
|
||||
|
||||
if options.conf_ntp:
|
||||
if not options.on_master and not options.unattended and not (
|
||||
options.ntp_servers or options.ntp_pool):
|
||||
options.ntp_servers, options.ntp_pool = timeconf.get_time_source()
|
||||
|
||||
cli_realm = ds.realm
|
||||
cli_realm_source = ds.realm_source
|
||||
logger.debug("will use discovered realm: %s", cli_realm)
|
||||
@@ -2428,14 +2387,6 @@ def install_check(options):
|
||||
logger.info("BaseDN: %s", cli_basedn)
|
||||
logger.debug("BaseDN source: %s", cli_basedn_source)
|
||||
|
||||
if not options.on_master:
|
||||
if options.ntp_servers:
|
||||
for server in options.ntp_servers:
|
||||
logger.info("NTP server: %s", server)
|
||||
|
||||
if options.ntp_pool:
|
||||
logger.info("NTP pool: %s", options.ntp_pool)
|
||||
|
||||
# ipa-join would fail with IP address instead of a FQDN
|
||||
for srv in cli_server:
|
||||
try:
|
||||
@@ -2501,7 +2452,7 @@ def update_ipa_nssdb():
|
||||
(nickname, sys_db.secdir, e))
|
||||
|
||||
|
||||
def sync_time(ntp_servers, ntp_pool, fstore, statestore):
|
||||
def sync_time(options, fstore, statestore):
|
||||
"""
|
||||
Will disable any other time synchronization service and configure chrony
|
||||
with given ntp(chrony) server and/or pool using Augeas.
|
||||
@@ -2513,24 +2464,18 @@ def sync_time(ntp_servers, ntp_pool, fstore, statestore):
|
||||
# disable other time&date services first
|
||||
timeconf.force_chrony(statestore)
|
||||
|
||||
if not ntp_servers and not ntp_pool:
|
||||
# autodiscovery happens in case that NTP configuration isn't explicitly
|
||||
# disabled and user did not provide any NTP server addresses or
|
||||
# NTP pool address to the installer interactively or as an cli argument
|
||||
ds = discovery.IPADiscovery()
|
||||
ntp_servers = ds.ipadns_search_srv(
|
||||
cli_domain, '_ntp._udp', None, break_on_first=False
|
||||
)
|
||||
if ntp_servers:
|
||||
for server in ntp_servers:
|
||||
# when autodiscovery found server records
|
||||
logger.debug("Found DNS record for NTP server: \t%s", server)
|
||||
|
||||
logger.info('Synchronizing time')
|
||||
|
||||
if not options.ntp_servers:
|
||||
ds = ipadiscovery.IPADiscovery()
|
||||
ntp_servers = ds.ipadns_search_srv(cli_domain, '_ntp._udp',
|
||||
None, break_on_first=False)
|
||||
else:
|
||||
ntp_servers = options.ntp_servers
|
||||
|
||||
configured = False
|
||||
if ntp_servers or ntp_pool:
|
||||
configured = timeconf.configure_chrony(ntp_servers, ntp_pool,
|
||||
if ntp_servers or options.ntp_pool:
|
||||
configured = timeconf.configure_chrony(ntp_servers, options.ntp_pool,
|
||||
fstore, statestore)
|
||||
else:
|
||||
logger.warning("No SRV records of NTP servers found and no NTP server "
|
||||
@@ -2603,8 +2548,6 @@ def _install(options):
|
||||
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
||||
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
|
||||
|
||||
statestore.backup_state('installation', 'complete', False)
|
||||
|
||||
if not options.on_master:
|
||||
# Try removing old principals from the keytab
|
||||
purge_host_keytab(cli_realm)
|
||||
@@ -2617,7 +2560,7 @@ def _install(options):
|
||||
|
||||
if options.conf_ntp:
|
||||
# Attempt to configure and sync time with NTP server (chrony).
|
||||
sync_time(options.ntp_servers, options.ntp_pool, fstore, statestore)
|
||||
sync_time(options, fstore, statestore)
|
||||
elif options.on_master:
|
||||
# If we're on master skipping the time sync here because it was done
|
||||
# in ipa-server-install
|
||||
@@ -2655,13 +2598,10 @@ def _install(options):
|
||||
force=options.force)
|
||||
env['KRB5_CONFIG'] = krb_name
|
||||
ccache_name = os.path.join(ccache_dir, 'ccache')
|
||||
join_args = [
|
||||
paths.SBIN_IPA_JOIN,
|
||||
"-s", cli_server[0],
|
||||
"-b", str(realm_to_suffix(cli_realm)),
|
||||
"-h", hostname,
|
||||
"-k", paths.KRB5_KEYTAB
|
||||
]
|
||||
join_args = [paths.SBIN_IPA_JOIN,
|
||||
"-s", cli_server[0],
|
||||
"-b", str(realm_to_suffix(cli_realm)),
|
||||
"-h", hostname]
|
||||
if options.debug:
|
||||
join_args.append("-d")
|
||||
env['XMLRPC_TRACE_CURL'] = 'yes'
|
||||
@@ -3191,22 +3131,19 @@ def _install(options):
|
||||
configure_nisdomain(
|
||||
options=options, domain=cli_domain, statestore=statestore)
|
||||
|
||||
statestore.delete_state('installation', 'complete')
|
||||
statestore.backup_state('installation', 'complete', True)
|
||||
logger.info('Client configuration complete.')
|
||||
|
||||
|
||||
def uninstall_check(options):
|
||||
if not is_ipa_client_configured():
|
||||
if options.on_master:
|
||||
rval = SUCCESS
|
||||
else:
|
||||
rval = CLIENT_NOT_CONFIGURED
|
||||
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
||||
|
||||
if not is_ipa_client_installed(fstore):
|
||||
raise ScriptError(
|
||||
"IPA client is not configured on this system.",
|
||||
rval=rval)
|
||||
rval=CLIENT_NOT_CONFIGURED)
|
||||
|
||||
if is_ipa_configured() and not options.on_master:
|
||||
server_fstore = sysrestore.FileStore(paths.SYSRESTORE)
|
||||
if server_fstore.has_files() and not options.on_master:
|
||||
logger.error(
|
||||
"IPA client is configured as a part of IPA server on this system.")
|
||||
logger.info("Refer to ipa-server-install for uninstallation.")
|
||||
@@ -3313,12 +3250,7 @@ def uninstall(options):
|
||||
|
||||
if not options.on_master and os.path.exists(paths.IPA_DEFAULT_CONF):
|
||||
logger.info("Unenrolling client from IPA server")
|
||||
join_args = [
|
||||
paths.SBIN_IPA_JOIN,
|
||||
"--unenroll",
|
||||
"-h", hostname,
|
||||
"-k", paths.KRB5_KEYTAB
|
||||
]
|
||||
join_args = [paths.SBIN_IPA_JOIN, "--unenroll", "-h", hostname]
|
||||
if options.debug:
|
||||
join_args.append("-d")
|
||||
env['XMLRPC_TRACE_CURL'] = 'yes'
|
||||
@@ -3499,7 +3431,6 @@ def uninstall(options):
|
||||
restore_time_sync(statestore, fstore)
|
||||
|
||||
if was_sshd_configured and services.knownservices.sshd.is_running():
|
||||
remove_file(paths.SSHD_IPA_CONFIG)
|
||||
services.knownservices.sshd.restart()
|
||||
|
||||
# Remove the Firefox configuration
|
||||
@@ -3523,8 +3454,6 @@ def uninstall(options):
|
||||
if fstore.has_files():
|
||||
logger.error('Some files have not been restored, see %s',
|
||||
paths.SYSRESTORE_INDEX)
|
||||
|
||||
statestore.delete_state('installation', 'complete')
|
||||
has_state = False
|
||||
for module in statestore.modules:
|
||||
logger.error(
|
||||
@@ -3753,7 +3682,6 @@ class ClientInstallInterface(hostname_.HostNameInstallInterface,
|
||||
|
||||
request_cert = knob(
|
||||
None,
|
||||
deprecated=True,
|
||||
description="request certificate for the machine",
|
||||
)
|
||||
request_cert = prepare_only(request_cert)
|
||||
@@ -3766,10 +3694,7 @@ class ClientInstallInterface(hostname_.HostNameInstallInterface,
|
||||
"--server cannot be used without providing --domain")
|
||||
|
||||
if self.force_ntpd:
|
||||
logger.warning(
|
||||
"Option --force-ntpd has been deprecated and will be "
|
||||
"removed in a future release."
|
||||
)
|
||||
logger.warning("Option --force-ntpd has been deprecated")
|
||||
|
||||
if self.ntp_servers and self.no_ntp:
|
||||
raise RuntimeError(
|
||||
@@ -3779,12 +3704,6 @@ class ClientInstallInterface(hostname_.HostNameInstallInterface,
|
||||
raise RuntimeError(
|
||||
"--ntp-pool cannot be used together with --no-ntp")
|
||||
|
||||
if self.request_cert:
|
||||
logger.warning(
|
||||
"Option --request-cert has been deprecated and will be "
|
||||
"removed in a future release."
|
||||
)
|
||||
|
||||
if self.no_nisdomain and self.nisdomain:
|
||||
raise RuntimeError(
|
||||
"--no-nisdomain cannot be used together with --nisdomain")
|
||||
|
||||
@@ -24,10 +24,11 @@ import os
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from urllib.parse import urlsplit
|
||||
# pylint: disable=import-error
|
||||
from six.moves.urllib.parse import urlsplit
|
||||
# pylint: enable=import-error
|
||||
|
||||
from ipalib.install import certmonger, certstore
|
||||
from ipalib.facts import is_ipa_configured
|
||||
from ipalib.install import certmonger, certstore, sysrestore
|
||||
from ipalib.install.kinit import kinit_keytab
|
||||
from ipapython import admintool, certdb, ipaldap, ipautil
|
||||
from ipaplatform import services
|
||||
@@ -71,7 +72,8 @@ def run_with_args(api):
|
||||
|
||||
"""
|
||||
server = urlsplit(api.env.jsonrpc_uri).hostname
|
||||
ldap = ipaldap.LDAPClient.from_hostname_secure(server)
|
||||
ldap_uri = ipaldap.get_ldap_uri(server)
|
||||
ldap = ipaldap.LDAPClient(ldap_uri)
|
||||
|
||||
tmpdir = tempfile.mkdtemp(prefix="tmp-")
|
||||
ccache_name = os.path.join(tmpdir, 'ccache')
|
||||
@@ -105,21 +107,17 @@ def run_with_args(api):
|
||||
os.environ['KRB5CCNAME'] = old_krb5ccname
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
if is_ipa_configured():
|
||||
server_fstore = sysrestore.FileStore(paths.SYSRESTORE)
|
||||
if server_fstore.has_files():
|
||||
update_server(certs)
|
||||
|
||||
# pylint: disable=import-error,ipa-forbidden-import
|
||||
from ipaserver.install import cainstance
|
||||
# pylint: enable=import-error,ipa-forbidden-import
|
||||
|
||||
# Add LWCA tracking requests. Only execute if *this server*
|
||||
# has CA installed (ca_enabled indicates CA-ful topology).
|
||||
if cainstance.CAInstance().is_configured():
|
||||
try:
|
||||
cainstance.add_lightweight_ca_tracking_requests(lwcas)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to add lightweight CA tracking requests")
|
||||
try:
|
||||
# pylint: disable=import-error,ipa-forbidden-import
|
||||
from ipaserver.install import cainstance
|
||||
# pylint: enable=import-error,ipa-forbidden-import
|
||||
cainstance.add_lightweight_ca_tracking_requests(lwcas)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to add lightweight CA tracking requests")
|
||||
|
||||
update_client(certs)
|
||||
|
||||
@@ -166,17 +164,6 @@ def update_server(certs):
|
||||
if request_id is not None:
|
||||
timeout = api.env.startup_timeout + 60
|
||||
|
||||
# The dogtag-ipa-ca-renew-agent-reuse Certmonger CA never
|
||||
# actually renews the certificate; it only pulls it from the
|
||||
# ca_renewal LDAP cert store.
|
||||
#
|
||||
# Why is this needed? If the CA cert gets renewed long
|
||||
# before its notAfter (expiry) date (e.g. to switch from
|
||||
# self-signed to external, or to switch to new external CA),
|
||||
# then the other (i.e. not caRenewalMaster) CA replicas will
|
||||
# not promptly pick up the new CA cert. So we make
|
||||
# ipa-certupdate always check for an updated CA cert.
|
||||
#
|
||||
logger.debug("resubmitting certmonger request '%s'", request_id)
|
||||
certmonger.resubmit_request(
|
||||
request_id, ca='dogtag-ipa-ca-renew-agent-reuse', profile='')
|
||||
@@ -208,16 +195,7 @@ def update_file(filename, certs, mode=0o644):
|
||||
|
||||
|
||||
def update_db(path, certs):
|
||||
"""Drop all CA certs from db then add certs from list provided
|
||||
|
||||
This may result in some churn as existing certs are dropped
|
||||
and re-added but this also provides the ability to change
|
||||
the trust flags.
|
||||
"""
|
||||
db = certdb.NSSDatabase(path)
|
||||
for name, flags in db.list_certs():
|
||||
if flags.ca:
|
||||
db.delete_cert(name)
|
||||
for cert, nickname, trusted, eku in certs:
|
||||
trust_flags = certstore.key_policy_to_trust_flags(trusted, True, eku)
|
||||
try:
|
||||
|
||||
@@ -1,606 +0,0 @@
|
||||
#
|
||||
# Authors:
|
||||
# Rob Crittenden <rcritten@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2012, 2019 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# Configure the automount client for ldap.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import tempfile
|
||||
import gssapi
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from xml.etree import cElementTree as etree
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as etree
|
||||
import SSSDConfig
|
||||
|
||||
# pylint: disable=import-error
|
||||
from six.moves.urllib.parse import urlsplit
|
||||
|
||||
# pylint: enable=import-error
|
||||
from optparse import OptionParser # pylint: disable=deprecated-module
|
||||
from ipapython import ipachangeconf
|
||||
from ipaclient.install import ipadiscovery
|
||||
from ipaclient.install.client import (
|
||||
CLIENT_NOT_CONFIGURED,
|
||||
CLIENT_ALREADY_CONFIGURED,
|
||||
)
|
||||
from ipalib import api, errors
|
||||
from ipalib.install import sysrestore
|
||||
from ipalib.install.kinit import kinit_keytab
|
||||
from ipalib.util import check_client_configuration
|
||||
from ipapython import ipautil
|
||||
from ipapython.ipa_log_manager import standard_logging_setup
|
||||
from ipapython.dn import DN
|
||||
from ipaplatform.constants import constants
|
||||
from ipaplatform.tasks import tasks
|
||||
from ipaplatform import services
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython.admintool import ScriptError
|
||||
|
||||
|
||||
logger = logging.getLogger(os.path.basename(__file__))
|
||||
|
||||
|
||||
def parse_options():
|
||||
usage = "%prog [options]\n"
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.add_option("--server", dest="server", help="FQDN of IPA server")
|
||||
parser.add_option(
|
||||
"--location",
|
||||
dest="location",
|
||||
default="default",
|
||||
help="Automount location",
|
||||
)
|
||||
parser.add_option(
|
||||
"-S",
|
||||
"--no-sssd",
|
||||
dest="sssd",
|
||||
action="store_false",
|
||||
default=True,
|
||||
help="Do not configure the client to use SSSD for automount",
|
||||
)
|
||||
parser.add_option(
|
||||
"--idmap-domain",
|
||||
dest="idmapdomain",
|
||||
default=None,
|
||||
help="nfs domain for idmapd.conf",
|
||||
)
|
||||
parser.add_option(
|
||||
"--debug",
|
||||
dest="debug",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="enable debugging",
|
||||
)
|
||||
parser.add_option(
|
||||
"-U",
|
||||
"--unattended",
|
||||
dest="unattended",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="unattended installation never prompts the user",
|
||||
)
|
||||
parser.add_option(
|
||||
"--uninstall",
|
||||
dest="uninstall",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Unconfigure automount",
|
||||
)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
return options, args
|
||||
|
||||
|
||||
def wait_for_sssd():
|
||||
"""
|
||||
It takes a bit for sssd to get going, lets loop until it is
|
||||
serving data.
|
||||
|
||||
This function returns nothing.
|
||||
"""
|
||||
n = 0
|
||||
found = False
|
||||
time.sleep(1)
|
||||
while n < 10 and not found:
|
||||
try:
|
||||
ipautil.run([paths.GETENT, "passwd", "admin@%s" % api.env.realm])
|
||||
found = True
|
||||
except Exception:
|
||||
time.sleep(1)
|
||||
n = n + 1
|
||||
|
||||
# This should never happen but if it does, may as well warn the user
|
||||
if not found:
|
||||
err_msg = (
|
||||
"Unable to find 'admin' user with "
|
||||
"'getent passwd admin@%s'!" % api.env.realm
|
||||
)
|
||||
logger.debug('%s', err_msg)
|
||||
print(err_msg)
|
||||
print(
|
||||
"This may mean that sssd didn't re-start properly after "
|
||||
"the configuration changes."
|
||||
)
|
||||
|
||||
|
||||
def configure_xml(fstore):
|
||||
authconf = paths.AUTOFS_LDAP_AUTH_CONF
|
||||
fstore.backup_file(authconf)
|
||||
|
||||
try:
|
||||
tree = etree.parse(authconf)
|
||||
except IOError as e:
|
||||
logger.debug('Unable to open file %s', e)
|
||||
logger.debug('Creating new from template')
|
||||
tree = etree.ElementTree(
|
||||
element=etree.Element('autofs_ldap_sasl_conf')
|
||||
)
|
||||
|
||||
element = tree.getroot()
|
||||
if element.tag != 'autofs_ldap_sasl_conf':
|
||||
raise RuntimeError('Invalid XML root in file %s' % authconf)
|
||||
|
||||
element.set('usetls', 'no')
|
||||
element.set('tlsrequired', 'no')
|
||||
element.set('authrequired', 'yes')
|
||||
element.set('authtype', 'GSSAPI')
|
||||
element.set('clientprinc', 'host/%s@%s' % (api.env.host, api.env.realm))
|
||||
|
||||
try:
|
||||
tree.write(authconf, xml_declaration=True, encoding='UTF-8')
|
||||
except IOError as e:
|
||||
print("Unable to write %s: %s" % (authconf, e))
|
||||
else:
|
||||
print("Configured %s" % authconf)
|
||||
|
||||
|
||||
def configure_nsswitch(statestore, options):
|
||||
"""
|
||||
This function was deprecated. Use ipaplatform.tasks.
|
||||
|
||||
Point automount to ldap in nsswitch.conf.
|
||||
This function is for non-SSSD setups only.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Use ipaplatform.tasks.tasks.enable_ldap_automount",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return tasks.enable_ldap_automount(statestore)
|
||||
|
||||
|
||||
def configure_autofs_sssd(fstore, statestore, autodiscover, options):
|
||||
try:
|
||||
sssdconfig = SSSDConfig.SSSDConfig()
|
||||
sssdconfig.import_config()
|
||||
domains = sssdconfig.list_active_domains()
|
||||
except Exception as e:
|
||||
sys.exit(e)
|
||||
|
||||
try:
|
||||
sssdconfig.new_service('autofs')
|
||||
except SSSDConfig.ServiceAlreadyExists:
|
||||
pass
|
||||
except SSSDConfig.ServiceNotRecognizedError:
|
||||
logger.error("Unable to activate the Autofs service in SSSD config.")
|
||||
logger.info(
|
||||
"Please make sure you have SSSD built with autofs support "
|
||||
"installed."
|
||||
)
|
||||
logger.info(
|
||||
"Configure autofs support manually in /etc/sssd/sssd.conf."
|
||||
)
|
||||
sys.exit("Cannot create the autofs service in sssd.conf")
|
||||
|
||||
sssdconfig.activate_service('autofs')
|
||||
|
||||
domain = None
|
||||
for name in domains:
|
||||
domain = sssdconfig.get_domain(name)
|
||||
try:
|
||||
provider = domain.get_option('id_provider')
|
||||
except SSSDConfig.NoOptionError:
|
||||
continue
|
||||
if provider == "ipa":
|
||||
domain.add_provider('ipa', 'autofs')
|
||||
try:
|
||||
domain.get_option('ipa_automount_location')
|
||||
print('An automount location is already configured')
|
||||
sys.exit(CLIENT_ALREADY_CONFIGURED)
|
||||
except SSSDConfig.NoOptionError:
|
||||
domain.set_option('ipa_automount_location', options.location)
|
||||
break
|
||||
|
||||
if domain is None:
|
||||
sys.exit('SSSD is not configured.')
|
||||
|
||||
sssdconfig.save_domain(domain)
|
||||
sssdconfig.write(paths.SSSD_CONF)
|
||||
statestore.backup_state('autofs', 'sssd', True)
|
||||
|
||||
sssd = services.service('sssd', api)
|
||||
sssd.restart()
|
||||
print("Restarting sssd, waiting for it to become available.")
|
||||
wait_for_sssd()
|
||||
|
||||
|
||||
def configure_autofs(fstore, statestore, autodiscover, server, options):
|
||||
"""
|
||||
fstore: the FileStore to back up files in
|
||||
options.server: the IPA server to use
|
||||
options.location: the Automount location to use
|
||||
"""
|
||||
if not autodiscover:
|
||||
ldap_uri = "ldap://%s" % server
|
||||
else:
|
||||
ldap_uri = "ldap:///%s" % api.env.basedn
|
||||
|
||||
search_base = str(
|
||||
DN(
|
||||
('cn', options.location),
|
||||
api.env.container_automount,
|
||||
api.env.basedn,
|
||||
)
|
||||
)
|
||||
replacevars = {
|
||||
'MAP_OBJECT_CLASS': 'automountMap',
|
||||
'ENTRY_OBJECT_CLASS': 'automount',
|
||||
'MAP_ATTRIBUTE': 'automountMapName',
|
||||
'ENTRY_ATTRIBUTE': 'automountKey',
|
||||
'VALUE_ATTRIBUTE': 'automountInformation',
|
||||
'SEARCH_BASE': search_base,
|
||||
'LDAP_URI': ldap_uri,
|
||||
}
|
||||
|
||||
ipautil.backup_config_and_replace_variables(
|
||||
fstore, paths.SYSCONFIG_AUTOFS, replacevars=replacevars
|
||||
)
|
||||
tasks.restore_context(paths.SYSCONFIG_AUTOFS)
|
||||
statestore.backup_state('autofs', 'sssd', False)
|
||||
|
||||
print("Configured %s" % paths.SYSCONFIG_AUTOFS)
|
||||
|
||||
|
||||
def configure_autofs_common(fstore, statestore, options):
|
||||
autofs = services.knownservices.autofs
|
||||
statestore.backup_state('autofs', 'enabled', autofs.is_enabled())
|
||||
statestore.backup_state('autofs', 'running', autofs.is_running())
|
||||
try:
|
||||
autofs.restart()
|
||||
print("Started %s" % autofs.service_name)
|
||||
except Exception as e:
|
||||
logger.error("%s failed to restart: %s", autofs.service_name, e)
|
||||
try:
|
||||
autofs.enable()
|
||||
except Exception as e:
|
||||
print(
|
||||
"Failed to configure automatic startup of the %s daemon"
|
||||
% (autofs.service_name)
|
||||
)
|
||||
logger.error(
|
||||
"Failed to enable automatic startup of the %s daemon: %s",
|
||||
autofs.service_name,
|
||||
str(e),
|
||||
)
|
||||
|
||||
|
||||
def uninstall(fstore, statestore):
|
||||
RESTORE_FILES = [
|
||||
paths.SYSCONFIG_AUTOFS,
|
||||
paths.AUTOFS_LDAP_AUTH_CONF,
|
||||
paths.SYSCONFIG_NFS,
|
||||
paths.IDMAPD_CONF,
|
||||
]
|
||||
STATES = ['autofs', 'rpcidmapd', 'rpcgssd']
|
||||
|
||||
if not statestore.get_state('autofs', 'sssd'):
|
||||
tasks.disable_ldap_automount(statestore)
|
||||
|
||||
if not any(fstore.has_file(f) for f in RESTORE_FILES) or not any(
|
||||
statestore.has_state(s) for s in STATES
|
||||
):
|
||||
print("IPA automount is not configured on this system")
|
||||
return CLIENT_NOT_CONFIGURED
|
||||
|
||||
print("Restoring configuration")
|
||||
|
||||
for filepath in RESTORE_FILES:
|
||||
if fstore.has_file(filepath):
|
||||
fstore.restore_file(filepath)
|
||||
if statestore.has_state('autofs'):
|
||||
enabled = statestore.restore_state('autofs', 'enabled')
|
||||
running = statestore.restore_state('autofs', 'running')
|
||||
sssd = statestore.restore_state('autofs', 'sssd')
|
||||
autofs = services.knownservices.autofs
|
||||
if not enabled:
|
||||
autofs.disable()
|
||||
if not running:
|
||||
autofs.stop()
|
||||
if sssd:
|
||||
try:
|
||||
sssdconfig = SSSDConfig.SSSDConfig()
|
||||
sssdconfig.import_config()
|
||||
sssdconfig.deactivate_service('autofs')
|
||||
domains = sssdconfig.list_active_domains()
|
||||
for name in domains:
|
||||
domain = sssdconfig.get_domain(name)
|
||||
try:
|
||||
provider = domain.get_option('id_provider')
|
||||
except SSSDConfig.NoOptionError:
|
||||
continue
|
||||
if provider == "ipa":
|
||||
domain.remove_option('ipa_automount_location')
|
||||
sssdconfig.save_domain(domain)
|
||||
domain.remove_provider('autofs')
|
||||
sssdconfig.save_domain(domain)
|
||||
break
|
||||
sssdconfig.write(paths.SSSD_CONF)
|
||||
sssd = services.service('sssd', api)
|
||||
sssd.restart()
|
||||
wait_for_sssd()
|
||||
except Exception as e:
|
||||
print('Unable to restore SSSD configuration: %s' % str(e))
|
||||
logger.debug(
|
||||
'Unable to restore SSSD configuration: %s', str(e)
|
||||
)
|
||||
|
||||
# rpcidmapd and rpcgssd are static units now
|
||||
if statestore.has_state('rpcidmapd'):
|
||||
statestore.delete_state('rpcidmapd', 'enabled')
|
||||
statestore.delete_state('rpcidmapd', 'running')
|
||||
if statestore.has_state('rpcgssd'):
|
||||
statestore.delete_state('rpcgssd', 'enabled')
|
||||
statestore.delete_state('rpcgssd', 'running')
|
||||
|
||||
nfsutils = services.knownservices['nfs-utils']
|
||||
try:
|
||||
nfsutils.restart()
|
||||
except Exception as e:
|
||||
logger.error("Failed to restart nfs client services (%s)", str(e))
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def configure_nfs(fstore, statestore, options):
|
||||
"""
|
||||
Configure secure NFS
|
||||
"""
|
||||
# Newer Fedora releases ship /etc/nfs.conf instead of /etc/sysconfig/nfs
|
||||
# and do not require changes there. On these, SECURE_NFS_VAR == None
|
||||
if constants.SECURE_NFS_VAR:
|
||||
replacevars = {constants.SECURE_NFS_VAR: 'yes'}
|
||||
ipautil.backup_config_and_replace_variables(
|
||||
fstore, paths.SYSCONFIG_NFS, replacevars=replacevars
|
||||
)
|
||||
tasks.restore_context(paths.SYSCONFIG_NFS)
|
||||
print("Configured %s" % paths.SYSCONFIG_NFS)
|
||||
|
||||
# Prepare the changes
|
||||
# We need to use IPAChangeConf as simple regexp substitution
|
||||
# does not cut it here
|
||||
conf = ipachangeconf.IPAChangeConf("IPA automount installer")
|
||||
conf.case_insensitive_sections = False
|
||||
conf.setOptionAssignment(" = ")
|
||||
conf.setSectionNameDelimiters(("[", "]"))
|
||||
|
||||
if options.idmapdomain is None:
|
||||
# Set NFSv4 domain to the IPA domain
|
||||
changes = [conf.setOption('Domain', api.env.domain)]
|
||||
elif options.idmapdomain == 'DNS':
|
||||
# Rely on idmapd auto-detection (DNS)
|
||||
changes = [conf.rmOption('Domain')]
|
||||
else:
|
||||
# Set NFSv4 domain to what was provided
|
||||
changes = [conf.setOption('Domain', options.idmapdomain)]
|
||||
|
||||
if changes is not None:
|
||||
section_with_changes = [conf.setSection('General', changes)]
|
||||
# Backup the file and apply the changes
|
||||
fstore.backup_file(paths.IDMAPD_CONF)
|
||||
conf.changeConf(paths.IDMAPD_CONF, section_with_changes)
|
||||
tasks.restore_context(paths.IDMAPD_CONF)
|
||||
print("Configured %s" % paths.IDMAPD_CONF)
|
||||
|
||||
rpcgssd = services.knownservices.rpcgssd
|
||||
try:
|
||||
rpcgssd.restart()
|
||||
except Exception as e:
|
||||
logger.error("Failed to restart rpc-gssd (%s)", str(e))
|
||||
nfsutils = services.knownservices['nfs-utils']
|
||||
try:
|
||||
nfsutils.restart()
|
||||
except Exception as e:
|
||||
logger.error("Failed to restart nfs client services (%s)", str(e))
|
||||
|
||||
|
||||
def configure_automount():
|
||||
try:
|
||||
check_client_configuration()
|
||||
except ScriptError as e:
|
||||
print(e.msg)
|
||||
sys.exit(e.rval)
|
||||
|
||||
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
||||
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
|
||||
|
||||
options, _args = parse_options()
|
||||
|
||||
standard_logging_setup(
|
||||
paths.IPACLIENT_INSTALL_LOG,
|
||||
verbose=False,
|
||||
debug=options.debug,
|
||||
filemode='a',
|
||||
console_format='%(message)s',
|
||||
)
|
||||
|
||||
cfg = dict(
|
||||
context='cli_installer',
|
||||
confdir=paths.ETC_IPA,
|
||||
in_server=False,
|
||||
debug=options.debug,
|
||||
verbose=0,
|
||||
)
|
||||
|
||||
# Bootstrap API early so that env object is available
|
||||
api.bootstrap(**cfg)
|
||||
|
||||
if options.uninstall:
|
||||
return uninstall(fstore, statestore)
|
||||
|
||||
ca_cert_path = None
|
||||
if os.path.exists(paths.IPA_CA_CRT):
|
||||
ca_cert_path = paths.IPA_CA_CRT
|
||||
|
||||
if statestore.has_state('autofs'):
|
||||
print('An automount location is already configured')
|
||||
sys.exit(CLIENT_ALREADY_CONFIGURED)
|
||||
|
||||
autodiscover = False
|
||||
ds = ipadiscovery.IPADiscovery()
|
||||
if not options.server:
|
||||
print("Searching for IPA server...")
|
||||
ret = ds.search(ca_cert_path=ca_cert_path)
|
||||
logger.debug('Executing DNS discovery')
|
||||
if ret == ipadiscovery.NO_LDAP_SERVER:
|
||||
logger.debug('Autodiscovery did not find LDAP server')
|
||||
s = urlsplit(api.env.xmlrpc_uri)
|
||||
server = [s.netloc]
|
||||
logger.debug('Setting server to %s', s.netloc)
|
||||
else:
|
||||
autodiscover = True
|
||||
if not ds.servers:
|
||||
sys.exit(
|
||||
'Autodiscovery was successful but didn\'t return a server'
|
||||
)
|
||||
logger.debug(
|
||||
'Autodiscovery success, possible servers %s',
|
||||
','.join(ds.servers),
|
||||
)
|
||||
server = ds.servers[0]
|
||||
else:
|
||||
server = options.server
|
||||
logger.debug("Verifying that %s is an IPA server", server)
|
||||
ldapret = ds.ipacheckldap(server, api.env.realm, ca_cert_path)
|
||||
if ldapret[0] == ipadiscovery.NO_ACCESS_TO_LDAP:
|
||||
print("Anonymous access to the LDAP server is disabled.")
|
||||
print("Proceeding without strict verification.")
|
||||
print(
|
||||
"Note: This is not an error if anonymous access has been "
|
||||
"explicitly restricted."
|
||||
)
|
||||
elif ldapret[0] == ipadiscovery.NO_TLS_LDAP:
|
||||
logger.warning("Unencrypted access to LDAP is not supported.")
|
||||
elif ldapret[0] != 0:
|
||||
sys.exit('Unable to confirm that %s is an IPA server' % server)
|
||||
|
||||
if not autodiscover:
|
||||
print("IPA server: %s" % server)
|
||||
logger.debug('Using fixed server %s', server)
|
||||
else:
|
||||
print("IPA server: DNS discovery")
|
||||
logger.debug('Configuring to use DNS discovery')
|
||||
|
||||
print("Location: %s" % options.location)
|
||||
logger.debug('Using automount location %s', options.location)
|
||||
|
||||
ccache_dir = tempfile.mkdtemp()
|
||||
ccache_name = os.path.join(ccache_dir, 'ccache')
|
||||
try:
|
||||
try:
|
||||
host_princ = str('host/%s@%s' % (api.env.host, api.env.realm))
|
||||
kinit_keytab(host_princ, paths.KRB5_KEYTAB, ccache_name)
|
||||
os.environ['KRB5CCNAME'] = ccache_name
|
||||
except gssapi.exceptions.GSSError as e:
|
||||
sys.exit("Failed to obtain host TGT: %s" % e)
|
||||
|
||||
# Finalize API when TGT obtained using host keytab exists
|
||||
api.finalize()
|
||||
|
||||
# Now we have a TGT, connect to IPA
|
||||
try:
|
||||
api.Backend.rpcclient.connect()
|
||||
except errors.KerberosError as e:
|
||||
sys.exit('Cannot connect to the server due to ' + str(e))
|
||||
try:
|
||||
# Use the RPC directly so older servers are supported
|
||||
api.Backend.rpcclient.forward(
|
||||
'automountlocation_show',
|
||||
ipautil.fsdecode(options.location),
|
||||
version=u'2.0',
|
||||
)
|
||||
except errors.VersionError as e:
|
||||
sys.exit('This client is incompatible: ' + str(e))
|
||||
except errors.NotFound:
|
||||
sys.exit(
|
||||
"Automount location '%s' does not exist" % options.location
|
||||
)
|
||||
except errors.PublicError as e:
|
||||
sys.exit(
|
||||
"Cannot connect to the server due to generic error: %s"
|
||||
% str(e)
|
||||
)
|
||||
finally:
|
||||
shutil.rmtree(ccache_dir)
|
||||
|
||||
if not options.unattended and not ipautil.user_input(
|
||||
"Continue to configure the system with these values?", False
|
||||
):
|
||||
sys.exit("Installation aborted")
|
||||
|
||||
try:
|
||||
if not options.sssd:
|
||||
tasks.enable_ldap_automount(statestore)
|
||||
configure_nfs(fstore, statestore, options)
|
||||
if options.sssd:
|
||||
configure_autofs_sssd(fstore, statestore, autodiscover, options)
|
||||
else:
|
||||
configure_xml(fstore)
|
||||
configure_autofs(
|
||||
fstore, statestore, autodiscover, server, options
|
||||
)
|
||||
configure_autofs_common(fstore, statestore, options)
|
||||
except Exception as e:
|
||||
logger.debug('Raised exception %s', e)
|
||||
print("Installation failed. Rolling back changes.")
|
||||
uninstall(fstore, statestore)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
if not os.geteuid() == 0:
|
||||
sys.exit("\nMust be run as root\n")
|
||||
configure_automount()
|
||||
except SystemExit as e:
|
||||
sys.exit(e)
|
||||
except RuntimeError as e:
|
||||
sys.exit(e)
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
sys.exit(1)
|
||||
@@ -62,6 +62,7 @@ ClientInstall = cli.install_tool(
|
||||
verbose=True,
|
||||
console_format='%(message)s',
|
||||
uninstall_log_file_name=paths.IPACLIENT_UNINSTALL_LOG,
|
||||
ignore_return_codes=(client.CLIENT_NOT_CONFIGURED,),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,767 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
# Configure the Samba suite to operate as domain member in IPA domain
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import logging
|
||||
import os
|
||||
import gssapi
|
||||
from urllib.parse import urlsplit
|
||||
from optparse import OptionParser # pylint: disable=deprecated-module
|
||||
from contextlib import contextmanager
|
||||
|
||||
from ipaclient import discovery
|
||||
from ipaclient.install.client import (
|
||||
CLIENT_NOT_CONFIGURED,
|
||||
CLIENT_ALREADY_CONFIGURED,
|
||||
)
|
||||
from ipalib import api, errors
|
||||
from ipalib.install import sysrestore
|
||||
from ipalib.util import check_client_configuration
|
||||
from ipalib.request import context
|
||||
from ipapython import ipautil
|
||||
from ipapython.errors import SetseboolError
|
||||
from ipapython.ipa_log_manager import standard_logging_setup
|
||||
from ipapython.dnsutil import DNSName
|
||||
from ipaplatform.tasks import tasks
|
||||
from ipaplatform.paths import paths
|
||||
from ipaplatform.constants import constants
|
||||
from ipaplatform import services
|
||||
from ipapython.admintool import ScriptError
|
||||
from samba import generate_random_password
|
||||
|
||||
logger = logging.getLogger(os.path.basename(__file__))
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def use_api_as_principal(principal, keytab):
|
||||
with ipautil.private_ccache() as ccache_file:
|
||||
try:
|
||||
old_principal = getattr(context, "principal", None)
|
||||
name = gssapi.Name(principal, gssapi.NameType.kerberos_principal)
|
||||
store = {"ccache": ccache_file, "client_keytab": keytab}
|
||||
gssapi.Credentials(name=name, usage="initiate", store=store)
|
||||
# Finalize API when TGT obtained using host keytab exists
|
||||
if not api.isdone("finalize"):
|
||||
api.finalize()
|
||||
|
||||
# Now we have a TGT, connect to IPA
|
||||
try:
|
||||
if api.Backend.rpcclient.isconnected():
|
||||
api.Backend.rpcclient.disconnect()
|
||||
api.Backend.rpcclient.connect()
|
||||
|
||||
yield
|
||||
except gssapi.exceptions.GSSError as e:
|
||||
raise Exception(
|
||||
"Unable to bind to IPA server. Error initializing "
|
||||
"principal %s in %s: %s" % (principal, keytab, str(e))
|
||||
)
|
||||
finally:
|
||||
if api.Backend.rpcclient.isconnected():
|
||||
api.Backend.rpcclient.disconnect()
|
||||
setattr(context, "principal", old_principal)
|
||||
|
||||
|
||||
def parse_options():
|
||||
usage = "%prog [options]\n"
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.add_option(
|
||||
"--server",
|
||||
dest="server",
|
||||
help="FQDN of IPA server to connect to",
|
||||
)
|
||||
parser.add_option(
|
||||
"--netbios-name",
|
||||
dest="netbiosname",
|
||||
help="NetBIOS name of this machine",
|
||||
default=None,
|
||||
)
|
||||
parser.add_option(
|
||||
"--no-homes",
|
||||
dest="no_homes",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Do not add [homes] share to the generated Samba configuration",
|
||||
)
|
||||
parser.add_option(
|
||||
"--no-nfs",
|
||||
dest="no_nfs",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Do not allow NFS integration (SELinux booleans)",
|
||||
)
|
||||
parser.add_option(
|
||||
"--force",
|
||||
dest="force",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="force installation by redoing all steps",
|
||||
)
|
||||
parser.add_option(
|
||||
"--debug",
|
||||
dest="debug",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="print debugging information",
|
||||
)
|
||||
parser.add_option(
|
||||
"-U",
|
||||
"--unattended",
|
||||
dest="unattended",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="unattended installation never prompts the user",
|
||||
)
|
||||
parser.add_option(
|
||||
"--uninstall",
|
||||
dest="uninstall",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Revert configuration and remove SMB service",
|
||||
)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
return options, args
|
||||
|
||||
|
||||
domain_information_template = """
|
||||
Domain name: {domain_name}
|
||||
NetBIOS name: {netbios_name}
|
||||
SID: {domain_sid}
|
||||
ID range: {range_id_min} - {range_id_max}
|
||||
"""
|
||||
|
||||
|
||||
def pretty_print_domain_information(info):
|
||||
result = []
|
||||
for domain in info:
|
||||
result.append(domain_information_template.format(**domain))
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
trust_keymap = {
|
||||
"netbios_name": "ipantflatname",
|
||||
"domain_sid": "ipantsecurityidentifier",
|
||||
"domain_name": "cn",
|
||||
}
|
||||
|
||||
|
||||
trust_keymap_trustdomain = {
|
||||
"netbios_name": "ipantflatname",
|
||||
"domain_sid": "ipanttrusteddomainsid",
|
||||
"domain_name": "cn",
|
||||
}
|
||||
|
||||
|
||||
def retrieve_domain_information(api):
|
||||
# Pull down default domain configuration
|
||||
# IPA master might be missing freeipa-server-trust-ad package
|
||||
# or `ipa-adtrust-install` was never run. In such case return
|
||||
# empty list to report an error
|
||||
try:
|
||||
tc_command = api.Command.trustconfig_show
|
||||
except AttributeError:
|
||||
return []
|
||||
try:
|
||||
result = tc_command()["result"]
|
||||
except errors.PublicError:
|
||||
return []
|
||||
|
||||
l_domain = dict()
|
||||
for key in trust_keymap:
|
||||
l_domain[key] = result.get(trust_keymap[key], [None])[0]
|
||||
|
||||
# Pull down ID range and other details of our domain
|
||||
#
|
||||
# TODO: make clear how to handle multiple ID ranges for ipa-local range
|
||||
# In Samba only one range can belong to the same idmap domain,
|
||||
# otherwise winbindd's _wbint_Sids2UnixIDs function will not be able
|
||||
# to accept that a mapped Unix ID belongs to the specified domain
|
||||
idrange_local = "{realm}_id_range".format(realm=api.env.realm)
|
||||
result = api.Command.idrange_show(idrange_local)["result"]
|
||||
l_domain["range_id_min"] = int(result["ipabaseid"][0])
|
||||
l_domain["range_id_max"] = (
|
||||
int(result["ipabaseid"][0]) + int(result["ipaidrangesize"][0]) - 1
|
||||
)
|
||||
|
||||
domains = [l_domain]
|
||||
|
||||
# Retrieve list of trusted domains, if they exist
|
||||
#
|
||||
# We flatten the whole trust list because it should be non-overlapping
|
||||
result = api.Command.trust_find()["result"]
|
||||
for forest in result:
|
||||
r = api.Command.trustdomain_find(forest["cn"][0], all=True, raw=True)[
|
||||
"result"
|
||||
]
|
||||
# We don't need to process forest root info separately
|
||||
# as trustdomain_find() returns it as well
|
||||
for dom in r:
|
||||
r_dom = dict()
|
||||
for key in trust_keymap:
|
||||
r_dom[key] = dom.get(trust_keymap_trustdomain[key], [None])[0]
|
||||
|
||||
r_idrange_name = "{realm}_id_range".format(
|
||||
realm=r_dom["domain_name"].upper()
|
||||
)
|
||||
|
||||
# TODO: support ipa-ad-trust-posix range as well
|
||||
r_idrange = api.Command.idrange_show(r_idrange_name)["result"]
|
||||
r_dom["range_id_min"] = int(r_idrange["ipabaseid"][0])
|
||||
r_dom["range_id_max"] = (
|
||||
int(r_idrange["ipabaseid"][0]) +
|
||||
int(r_idrange["ipaidrangesize"][0]) - 1
|
||||
)
|
||||
domains.append(r_dom)
|
||||
return domains
|
||||
|
||||
|
||||
smb_conf_template = """
|
||||
[global]
|
||||
# Limit number of forked processes to avoid SMBLoris attack
|
||||
max smbd processes = 1000
|
||||
# Use dedicated Samba keytab. The key there must be synchronized
|
||||
# with Samba tdb databases or nothing will work
|
||||
dedicated keytab file = FILE:${samba_keytab}
|
||||
kerberos method = dedicated keytab
|
||||
# Set up logging per machine and Samba process
|
||||
log file = /var/log/samba/log.%m
|
||||
log level = 1
|
||||
# We force 'member server' role to allow winbind automatically
|
||||
# discover what is supported by the domain controller side
|
||||
server role = member server
|
||||
realm = ${realm}
|
||||
netbios name = ${machine_name}
|
||||
workgroup = ${netbios_name}
|
||||
# Local writable range for IDs not coming from IPA or trusted domains
|
||||
idmap config * : range = 0 - 0
|
||||
idmap config * : backend = tdb
|
||||
"""
|
||||
|
||||
idmap_conf_domain_snippet = """
|
||||
idmap config ${netbios_name} : range = ${range_id_min} - ${range_id_max}
|
||||
idmap config ${netbios_name} : backend = sss
|
||||
"""
|
||||
|
||||
homes_conf_snippet = """
|
||||
# Default homes share
|
||||
[homes]
|
||||
read only = no
|
||||
"""
|
||||
|
||||
|
||||
def configure_smb_conf(fstore, statestore, options, domains):
|
||||
sub_dict = {
|
||||
"samba_keytab": paths.SAMBA_KEYTAB,
|
||||
"realm": api.env.realm,
|
||||
"machine_name": options.netbiosname,
|
||||
}
|
||||
|
||||
# First domain in the list is ours, pull our domain name from there
|
||||
sub_dict["netbios_name"] = domains[0]["netbios_name"]
|
||||
|
||||
# Construct elements of smb.conf by pre-rendering idmap configuration
|
||||
template = [smb_conf_template]
|
||||
for dom in domains:
|
||||
template.extend([ipautil.template_str(idmap_conf_domain_snippet, dom)])
|
||||
|
||||
# Add default homes share so that users can log into Samba
|
||||
if not options.no_homes:
|
||||
template.extend([homes_conf_snippet])
|
||||
|
||||
fstore.backup_file(paths.SMB_CONF)
|
||||
with open(paths.SMB_CONF, "w") as f:
|
||||
f.write(ipautil.template_str("\n".join(template), sub_dict))
|
||||
tasks.restore_context(paths.SMB_CONF)
|
||||
|
||||
|
||||
def generate_smb_machine_account(fstore, statestore, options, domain):
|
||||
# Ideally, we should be using generate_random_machine_password()
|
||||
# from samba but it uses munged UTF-16 which is not decodable
|
||||
# by the code called from 'net changesecretpw -f'. Thus, we'd limit
|
||||
# password to ASCII only.
|
||||
return generate_random_password(128, 255)
|
||||
|
||||
|
||||
def retrieve_service_principal(
|
||||
fstore, statestore, options, domain, principal, password
|
||||
):
|
||||
# Use explicit encryption types. SMB service must have arcfour-hmac
|
||||
# generated to allow domain member to authenticate to the domain controller
|
||||
args = [
|
||||
paths.IPA_GETKEYTAB,
|
||||
"-p",
|
||||
principal,
|
||||
"-k",
|
||||
paths.SAMBA_KEYTAB,
|
||||
"-P",
|
||||
"-e",
|
||||
"aes128-cts-hmac-sha1-96,aes256-cts-hmac-sha1-96,arcfour-hmac",
|
||||
]
|
||||
try:
|
||||
ipautil.run(args, stdin=password + "\n" + password, encoding="utf-8")
|
||||
except ipautil.CalledProcessError as e:
|
||||
logger.error(
|
||||
"Cannot set machine account password at IPA DC. Error: %s",
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
# Once we fetched the keytab, we also need to set ipaNTHash attribute
|
||||
# Use ipa-pwd-extop plugin to regenerate it from the Kerberos key
|
||||
value = "ipaNTHash=MagicRegen"
|
||||
try:
|
||||
api.Command.service_mod(principal, addattr=value)
|
||||
except errors.PublicError as e:
|
||||
logger.error(
|
||||
"Cannot update %s principal NT hash value due to an error: %s",
|
||||
principal,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def populate_samba_databases(fstore, statestore, options, domain, password):
|
||||
# First, set domain SID in Samba
|
||||
args = [paths.NET, "setdomainsid", domain["domain_sid"]]
|
||||
try:
|
||||
ipautil.run(args)
|
||||
except ipautil.CalledProcessError as e:
|
||||
logger.error("Cannot set domain SID in Samba. Error: %s", e)
|
||||
raise
|
||||
|
||||
# Next, make sure we can set machine account credentials
|
||||
# the workaround with tdbtool is temporary until 'net' utility
|
||||
# will not provide us a way to perform 'offline join' procedure
|
||||
secrets_key = "SECRETS/MACHINE_LAST_CHANGE_TIME/{}".format(
|
||||
domain["netbios_name"]
|
||||
)
|
||||
args = [paths.TDBTOOL, paths.SECRETS_TDB, "store", secrets_key, "2\\00"]
|
||||
try:
|
||||
ipautil.run(args)
|
||||
except ipautil.CalledProcessError as e:
|
||||
logger.error(
|
||||
"Cannot prepare machine account creds in Samba. Error: %s", e,
|
||||
)
|
||||
raise
|
||||
|
||||
secrets_key = "SECRETS/MACHINE_PASSWORD/{}".format(domain["netbios_name"])
|
||||
args = [paths.TDBTOOL, paths.SECRETS_TDB, "store", secrets_key, "2\\00"]
|
||||
try:
|
||||
ipautil.run(args)
|
||||
except ipautil.CalledProcessError as e:
|
||||
logger.error(
|
||||
"Cannot prepare machine account creds in Samba. Error: %s", e,
|
||||
)
|
||||
raise
|
||||
|
||||
# Finally, set actual machine account's password
|
||||
args = [paths.NET, "changesecretpw", "-f"]
|
||||
try:
|
||||
ipautil.run(args, stdin=password, encoding="utf-8")
|
||||
except ipautil.CalledProcessError as e:
|
||||
logger.error(
|
||||
"Cannot set machine account creds in Samba. Error: %s", e,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def configure_default_groupmap(fstore, statestore, options, domain):
|
||||
args = [
|
||||
paths.NET,
|
||||
"groupmap",
|
||||
"add",
|
||||
"sid=S-1-5-32-546",
|
||||
"unixgroup=nobody",
|
||||
"type=builtin",
|
||||
]
|
||||
|
||||
logger.info("Map BUILTIN\\Guests to a group 'nobody'")
|
||||
try:
|
||||
ipautil.run(args)
|
||||
except ipautil.CalledProcessError as e:
|
||||
if "already mapped to SID S-1-5-32-546" not in e.stdout:
|
||||
logger.error(
|
||||
'Cannot map BUILTIN\\Guests to a group "nobody". Error: %s',
|
||||
e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def set_selinux_booleans(booleans, statestore, backup=True):
|
||||
def default_backup_func(name, value):
|
||||
statestore.backup_state("selinux", name, value)
|
||||
|
||||
backup_func = default_backup_func if backup else None
|
||||
try:
|
||||
tasks.set_selinux_booleans(booleans, backup_func=backup_func)
|
||||
except SetseboolError as e:
|
||||
print("WARNING: " + str(e))
|
||||
logger.info("WARNING: %s", e)
|
||||
|
||||
|
||||
def harden_configuration(fstore, statestore, options, domain):
|
||||
# Add default homes share so that users can log into Samba
|
||||
if not options.no_homes:
|
||||
set_selinux_booleans(
|
||||
constants.SELINUX_BOOLEAN_SMBSERVICE["share_home_dirs"], statestore
|
||||
)
|
||||
# Allow Samba to access NFS-shared content
|
||||
if not options.no_nfs:
|
||||
set_selinux_booleans(
|
||||
constants.SELINUX_BOOLEAN_SMBSERVICE["reshare_nfs_with_samba"],
|
||||
statestore,
|
||||
)
|
||||
|
||||
|
||||
def uninstall(fstore, statestore, options):
|
||||
# Shut down Samba services and disable them
|
||||
smb = services.service("smb", api)
|
||||
winbind = services.service("winbind", api)
|
||||
for svc in (smb, winbind):
|
||||
if svc.is_running():
|
||||
svc.stop()
|
||||
svc.disable()
|
||||
|
||||
# Restore the state of affected selinux booleans
|
||||
boolean_states = {}
|
||||
for usecase in constants.SELINUX_BOOLEAN_SMBSERVICE:
|
||||
for name in usecase:
|
||||
boolean_states[name] = statestore.restore_state("selinux", name)
|
||||
|
||||
if boolean_states:
|
||||
set_selinux_booleans(boolean_states, statestore, backup=False)
|
||||
|
||||
# Remove samba's credentials cache
|
||||
ipautil.remove_ccache(ccache_path=paths.KRB5CC_SAMBA)
|
||||
|
||||
# Remove samba's configuration file
|
||||
if fstore.has_file(paths.SMB_CONF):
|
||||
ipautil.remove_file(paths.SMB_CONF)
|
||||
fstore.restore_file(paths.SMB_CONF)
|
||||
|
||||
# Remove samba's persistent and temporary tdb files
|
||||
tdb_files = [
|
||||
tdb_file
|
||||
for tdb_file in os.listdir(paths.SAMBA_DIR)
|
||||
if tdb_file.endswith(".tdb")
|
||||
]
|
||||
for tdb_file in tdb_files:
|
||||
ipautil.remove_file(tdb_file)
|
||||
|
||||
# Remove our keys from samba's keytab
|
||||
if os.path.exists(paths.SAMBA_KEYTAB):
|
||||
try:
|
||||
ipautil.run(
|
||||
[
|
||||
paths.IPA_RMKEYTAB,
|
||||
"--principal",
|
||||
api.env.smb_princ,
|
||||
"-k",
|
||||
paths.SAMBA_KEYTAB,
|
||||
]
|
||||
)
|
||||
except ipautil.CalledProcessError as e:
|
||||
if e.returncode != 5:
|
||||
logger.critical("Failed to remove old key for %s",
|
||||
api.env.smb_princ)
|
||||
|
||||
with use_api_as_principal(api.env.host_princ, paths.KRB5_KEYTAB):
|
||||
try:
|
||||
api.Command.service_del(api.env.smb_princ)
|
||||
except errors.VersionError as e:
|
||||
print("This client is incompatible: " + str(e))
|
||||
except errors.NotFound:
|
||||
logger.debug("No SMB service principal exists, OK to proceed")
|
||||
except errors.PublicError as e:
|
||||
logger.error(
|
||||
"Cannot connect to the server due to "
|
||||
"a generic error: %s", e,
|
||||
)
|
||||
|
||||
|
||||
def run():
|
||||
try:
|
||||
check_client_configuration()
|
||||
except ScriptError as e:
|
||||
print(e.msg)
|
||||
return e.rval
|
||||
|
||||
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
||||
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
|
||||
|
||||
options, _args = parse_options()
|
||||
|
||||
logfile = paths.IPACLIENTSAMBA_INSTALL_LOG
|
||||
if options.uninstall:
|
||||
logfile = paths.IPACLIENTSAMBA_UNINSTALL_LOG
|
||||
|
||||
standard_logging_setup(
|
||||
logfile,
|
||||
verbose=False,
|
||||
debug=options.debug,
|
||||
filemode="a",
|
||||
console_format="%(message)s",
|
||||
)
|
||||
|
||||
cfg = dict(
|
||||
context="cli_installer",
|
||||
confdir=paths.ETC_IPA,
|
||||
in_server=False,
|
||||
debug=options.debug,
|
||||
verbose=0,
|
||||
)
|
||||
|
||||
# Bootstrap API early so that env object is available
|
||||
api.bootstrap(**cfg)
|
||||
|
||||
local_config = dict(
|
||||
host_princ=str("host/%s@%s" % (api.env.host, api.env.realm)),
|
||||
smb_princ=str("cifs/%s@%s" % (api.env.host, api.env.realm)),
|
||||
)
|
||||
|
||||
# Until api.finalize() is called, we can add our own configuration
|
||||
api.env._merge(**local_config)
|
||||
|
||||
if options.uninstall:
|
||||
if statestore.has_state("domain_member"):
|
||||
uninstall(fstore, statestore, options)
|
||||
try:
|
||||
keys = (
|
||||
"configured", "hardening", "groupmap", "tdb",
|
||||
"service.principal", "smb.conf"
|
||||
)
|
||||
for key in keys:
|
||||
statestore.delete_state("domain_member", key)
|
||||
except Exception as e:
|
||||
print(
|
||||
"Error: Failed to remove the domain_member statestores: "
|
||||
"%s" % e
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
print(
|
||||
"Samba configuration is reverted. "
|
||||
"However, Samba databases were fully cleaned and "
|
||||
"old configuration file will not be usable anymore."
|
||||
)
|
||||
else:
|
||||
print("Samba domain member is not configured yet")
|
||||
return 0
|
||||
|
||||
ca_cert_path = None
|
||||
if os.path.exists(paths.IPA_CA_CRT):
|
||||
ca_cert_path = paths.IPA_CA_CRT
|
||||
|
||||
if statestore.has_state("domain_member") and not options.force:
|
||||
print("Samba domain member is already configured")
|
||||
return CLIENT_ALREADY_CONFIGURED
|
||||
|
||||
if not os.path.exists(paths.SMBD):
|
||||
print("Samba suite is not installed")
|
||||
return CLIENT_NOT_CONFIGURED
|
||||
|
||||
autodiscover = False
|
||||
ds = discovery.IPADiscovery()
|
||||
if not options.server:
|
||||
print("Searching for IPA server...")
|
||||
ret = ds.search(ca_cert_path=ca_cert_path)
|
||||
logger.debug("Executing DNS discovery")
|
||||
if ret == discovery.NO_LDAP_SERVER:
|
||||
logger.debug("Autodiscovery did not find LDAP server")
|
||||
s = urlsplit(api.env.xmlrpc_uri)
|
||||
server = [s.netloc]
|
||||
logger.debug("Setting server to %s", s.netloc)
|
||||
else:
|
||||
autodiscover = True
|
||||
if not ds.servers:
|
||||
print(
|
||||
"Autodiscovery was successful but didn't return a server"
|
||||
)
|
||||
return 1
|
||||
logger.debug(
|
||||
"Autodiscovery success, possible servers %s",
|
||||
",".join(ds.servers),
|
||||
)
|
||||
server = ds.servers[0]
|
||||
else:
|
||||
server = options.server
|
||||
logger.debug("Verifying that %s is an IPA server", server)
|
||||
ldapret = ds.ipacheckldap(server, api.env.realm, ca_cert_path)
|
||||
if ldapret[0] == discovery.NO_ACCESS_TO_LDAP:
|
||||
print("Anonymous access to the LDAP server is disabled.")
|
||||
print("Proceeding without strict verification.")
|
||||
print(
|
||||
"Note: This is not an error if anonymous access has been "
|
||||
"explicitly restricted."
|
||||
)
|
||||
elif ldapret[0] == discovery.NO_TLS_LDAP:
|
||||
logger.warning("Unencrypted access to LDAP is not supported.")
|
||||
elif ldapret[0] != 0:
|
||||
print("Unable to confirm that %s is an IPA server" % server)
|
||||
return 1
|
||||
|
||||
if not autodiscover:
|
||||
print("IPA server: %s" % server)
|
||||
logger.debug("Using fixed server %s", server)
|
||||
else:
|
||||
print("IPA server: DNS discovery")
|
||||
logger.info("Configured to use DNS discovery")
|
||||
|
||||
if api.env.host == server:
|
||||
logger.error(
|
||||
"Cannot run on IPA master. "
|
||||
"Cannot configure Samba as a domain member on a domain "
|
||||
"controller. Please use ipa-adtrust-install for that!"
|
||||
)
|
||||
return 1
|
||||
|
||||
if not options.netbiosname:
|
||||
options.netbiosname = DNSName.from_text(api.env.host)[0].decode()
|
||||
options.netbiosname = options.netbiosname.upper()
|
||||
|
||||
with use_api_as_principal(api.env.host_princ, paths.KRB5_KEYTAB):
|
||||
try:
|
||||
# Try to access 'service_add_smb' command, if it throws
|
||||
# AttributeError exception, the IPA server doesn't support
|
||||
# setting up Samba as a domain member.
|
||||
service_add_smb = api.Command.service_add_smb
|
||||
|
||||
# Now try to see if SMB principal already exists
|
||||
api.Command.service_show(api.env.smb_princ)
|
||||
|
||||
# If no exception was raised, the object exists.
|
||||
# We cannot continue because we would break existing configuration
|
||||
print(
|
||||
"WARNING: SMB service principal %s already exists. "
|
||||
"Please remove it before proceeding." % (api.env.smb_princ)
|
||||
)
|
||||
if not options.force:
|
||||
return 1
|
||||
# For --force, we should then delete cifs/.. service object
|
||||
api.Command.service_del(api.env.smb_princ)
|
||||
except AttributeError:
|
||||
logger.error(
|
||||
"Chosen IPA master %s does not have support to "
|
||||
"set up Samba domain members", server,
|
||||
)
|
||||
return 1
|
||||
except errors.VersionError as e:
|
||||
print("This client is incompatible: " + str(e))
|
||||
return 1
|
||||
except errors.NotFound:
|
||||
logger.debug("No SMB service principal exists, OK to proceed")
|
||||
except errors.PublicError as e:
|
||||
logger.error(
|
||||
"Cannot connect to the server due to "
|
||||
"a generic error: %s", e,
|
||||
)
|
||||
return 1
|
||||
|
||||
# At this point we have proper setup:
|
||||
# - we connected to IPA API end-point as a host principal
|
||||
# - no cifs/... principal exists so we can create it
|
||||
print("Chosen IPA master: %s" % server)
|
||||
print("SMB principal to be created: %s" % api.env.smb_princ)
|
||||
print("NetBIOS name to be used: %s" % options.netbiosname)
|
||||
logger.info("Chosen IPA master: %s", server)
|
||||
logger.info("SMB principal to be created: %s", api.env.smb_princ)
|
||||
logger.info("NetBIOS name to be used: %s", options.netbiosname)
|
||||
|
||||
# 1. Pull down ID range and other details of known domains
|
||||
domains = retrieve_domain_information(api)
|
||||
if len(domains) == 0:
|
||||
# logger.error() produces both log file and stderr output
|
||||
logger.error("No configured trust controller detected "
|
||||
"on IPA masters. Use ipa-adtrust-install on an IPA "
|
||||
"master to configure trust controller role.")
|
||||
return 1
|
||||
|
||||
str_info = pretty_print_domain_information(domains)
|
||||
logger.info("Discovered domains to use:\n%s", str_info)
|
||||
print("Discovered domains to use:\n%s" % str_info)
|
||||
|
||||
if not options.unattended and not ipautil.user_input(
|
||||
"Continue to configure the system with these values?", False
|
||||
):
|
||||
print("Installation aborted")
|
||||
return 1
|
||||
|
||||
# 2. Create SMB service principal, if we are here, the command exists
|
||||
if (
|
||||
not statestore.get_state("domain_member", "service.principal") or
|
||||
options.force
|
||||
):
|
||||
service_add_smb(api.env.host, options.netbiosname)
|
||||
statestore.backup_state(
|
||||
"domain_member", "service.principal", "configured"
|
||||
)
|
||||
|
||||
# 3. Generate machine account password for reuse
|
||||
password = generate_smb_machine_account(
|
||||
fstore, statestore, options, domains[0]
|
||||
)
|
||||
|
||||
# 4. Now that we have all domains retrieved, we can generate smb.conf
|
||||
if (
|
||||
not statestore.get_state("domain_member", "smb.conf") or
|
||||
options.force
|
||||
):
|
||||
configure_smb_conf(fstore, statestore, options, domains)
|
||||
statestore.backup_state("domain_member", "smb.conf", "configured")
|
||||
|
||||
# 5. Create SMB service
|
||||
if statestore.get_state("domain_member",
|
||||
"service.principal") == "configured":
|
||||
retrieve_service_principal(
|
||||
fstore, statestore, options, domains[0],
|
||||
api.env.smb_princ, password
|
||||
)
|
||||
statestore.backup_state(
|
||||
"domain_member", "service.principal", "configured"
|
||||
)
|
||||
|
||||
# 6. Configure databases to contain proper details
|
||||
if not statestore.get_state("domain_member", "tdb") or options.force:
|
||||
populate_samba_databases(
|
||||
fstore, statestore, options, domains[0], password
|
||||
)
|
||||
statestore.backup_state("domain_member", "tdb", "configured")
|
||||
|
||||
# 7. Configure default group mapping
|
||||
if (
|
||||
not statestore.get_state("domain_member", "groupmap") or
|
||||
options.force
|
||||
):
|
||||
configure_default_groupmap(fstore, statestore, options, domains[0])
|
||||
statestore.backup_state("domain_member", "groupmap", "configured")
|
||||
|
||||
# 8. Enable SELinux policies
|
||||
if (
|
||||
not statestore.get_state("domain_member", "hardening") or
|
||||
options.force
|
||||
):
|
||||
harden_configuration(fstore, statestore, options, domains[0])
|
||||
statestore.backup_state("domain_member", "hardening", "configured")
|
||||
|
||||
# 9. Finally, store the state of upgrade
|
||||
statestore.backup_state("domain_member", "configured", True)
|
||||
|
||||
# Suggest service start only after validating smb.conf
|
||||
print(
|
||||
"Samba domain member is configured. "
|
||||
"Please check configuration at %s and "
|
||||
"start smb and winbind services" % paths.SMB_CONF
|
||||
)
|
||||
logger.info(
|
||||
"Samba domain member is configured. "
|
||||
"Please check configuration at %s and "
|
||||
"start smb and winbind services",
|
||||
paths.SMB_CONF,
|
||||
)
|
||||
|
||||
return 0
|
||||
@@ -1,827 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""This tool prepares then sends email notifications to users
|
||||
whose passwords are expiring in the near future.
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import ast
|
||||
import grp
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import logging
|
||||
import smtplib
|
||||
import time
|
||||
|
||||
from collections import deque
|
||||
from datetime import datetime, timedelta
|
||||
from email.utils import formataddr, formatdate
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.header import Header
|
||||
from email.utils import make_msgid
|
||||
from socket import error as socketerror
|
||||
|
||||
from ipaplatform.paths import paths
|
||||
from ipalib import api, errors
|
||||
from ipalib.facts import is_ipa_client_configured
|
||||
from ipapython import admintool, ipaldap
|
||||
from ipapython.dn import DN
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader, TemplateSyntaxError
|
||||
|
||||
|
||||
EPN_CONF = "/etc/ipa/epn.conf"
|
||||
EPN_CONFIG = {
|
||||
"smtp_server": "localhost",
|
||||
"smtp_port": 25,
|
||||
"smtp_user": None,
|
||||
"smtp_password": None,
|
||||
"smtp_timeout": 60,
|
||||
"smtp_security": "none",
|
||||
"smtp_admin": "root@localhost",
|
||||
"smtp_delay": None,
|
||||
"mail_from": None,
|
||||
"notify_ttls": "28,14,7,3,1",
|
||||
"msg_charset": "utf8",
|
||||
"msg_subtype": "plain",
|
||||
"msg_subject": "Your password will expire soon.",
|
||||
}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def drop_privileges(new_username="daemon", new_groupname="daemon"):
|
||||
"""Drop privileges, defaults to daemon:daemon.
|
||||
"""
|
||||
try:
|
||||
if os.getuid() != 0:
|
||||
return
|
||||
|
||||
os.setgroups([])
|
||||
os.setgid(pwd.getpwnam(new_username).pw_uid)
|
||||
os.setuid(grp.getgrnam(new_groupname).gr_gid)
|
||||
|
||||
if os.getuid() == 0:
|
||||
raise Exception()
|
||||
|
||||
logger.debug(
|
||||
"Dropped privileges to user=%s, group=%s",
|
||||
new_username,
|
||||
new_groupname,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to drop privileges to %s, %s: %s",
|
||||
new_username,
|
||||
new_groupname,
|
||||
e,
|
||||
)
|
||||
|
||||
|
||||
class EPNUserList:
|
||||
"""Maintains a list of users whose passwords are expiring.
|
||||
Provides add(), check(), pop(), and json_print().
|
||||
From the outside, the list is considered always sorted:
|
||||
* displaying the list results in a sorted JSON representation thereof
|
||||
* pop() returns the "most urgent" item from the list.
|
||||
Internal implementation notes:
|
||||
* Uses a deque instead of a list for efficiency reasons
|
||||
* all add()-style methods MUST set _sorted to False.
|
||||
* all print() and pop-like methods MUST call _sort() first.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._sorted = False
|
||||
self._expiring_password_user_dq = deque()
|
||||
|
||||
def __bool__(self):
|
||||
"""If it quacks like a container...
|
||||
"""
|
||||
return bool(self._expiring_password_user_dq)
|
||||
|
||||
def __len__(self):
|
||||
"""Return len(self)."""
|
||||
return len(self._expiring_password_user_dq)
|
||||
|
||||
def get_ldap_attr(self, entry, attr):
|
||||
"""Get a single value from a multi-valued attr in a safe way"""
|
||||
return str(entry.get(attr, [""]).pop(0))
|
||||
|
||||
def add(self, entry):
|
||||
"""Parses and appends an LDAP user entry with the uid, cn,
|
||||
givenname, sn, krbpasswordexpiration and mail attributes.
|
||||
"""
|
||||
try:
|
||||
self._sorted = False
|
||||
if entry.get("mail") is None:
|
||||
logger.error("IPA-EPN: No mail address defined for: %s",
|
||||
entry.dn)
|
||||
return
|
||||
self._expiring_password_user_dq.append(
|
||||
dict(
|
||||
uid=self.get_ldap_attr(entry, "uid"),
|
||||
cn=self.get_ldap_attr(entry, "cn"),
|
||||
givenname=self.get_ldap_attr(entry, "givenname"),
|
||||
sn=self.get_ldap_attr(entry, "sn"),
|
||||
krbpasswordexpiration=(
|
||||
self.get_ldap_attr(entry,"krbpasswordexpiration")
|
||||
),
|
||||
mail=str(entry.get("mail")),
|
||||
)
|
||||
)
|
||||
except IndexError as e:
|
||||
logger.info("IPA-EPN: Could not parse entry: %s", e)
|
||||
|
||||
def pop(self):
|
||||
"""Returns the "most urgent" user to notify.
|
||||
In fact: popleft()
|
||||
"""
|
||||
self._sort()
|
||||
try:
|
||||
return self._expiring_password_user_dq.popleft()
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
def check(self):
|
||||
self.json_print(really_print=False)
|
||||
|
||||
def json_print(self, really_print=True):
|
||||
"""Dump self._expiring_password_user_dq to JSON.
|
||||
Check that the result can be re-rencoded to UTF-8.
|
||||
If really_print, print the result.
|
||||
"""
|
||||
try:
|
||||
self._sort()
|
||||
temp_str = json.dumps(
|
||||
list(self._expiring_password_user_dq),
|
||||
indent=4,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
temp_str.encode("utf8")
|
||||
if really_print:
|
||||
print(temp_str)
|
||||
except Exception as e:
|
||||
logger.error("IPA-EPN: unexpected error: %s", e)
|
||||
|
||||
def _sort(self):
|
||||
if not self._sorted:
|
||||
if isinstance(self._expiring_password_user_dq, deque):
|
||||
self._expiring_password_user_dq = deque(
|
||||
sorted(
|
||||
self._expiring_password_user_dq,
|
||||
key=lambda item: item["krbpasswordexpiration"],
|
||||
)
|
||||
)
|
||||
self._sorted = True
|
||||
|
||||
|
||||
class EPN(admintool.AdminTool):
|
||||
command_name = "IPA-EPN"
|
||||
log_file_name = paths.IPAEPN_LOG
|
||||
|
||||
usage = "%prog [options]"
|
||||
description = "Expiring Password Notifications (EPN)"
|
||||
|
||||
def __init__(self, options, args):
|
||||
super(EPN, self).__init__(options, args)
|
||||
self._conn = None
|
||||
self._expiring_password_user_list = EPNUserList()
|
||||
self._ldap_data = []
|
||||
self._date_ranges = []
|
||||
self._mailer = None
|
||||
self.env = None
|
||||
self.default_email_domain = None
|
||||
|
||||
@classmethod
|
||||
def add_options(cls, parser):
|
||||
super(EPN, cls).add_options(parser, debug_option=True)
|
||||
parser.add_option(
|
||||
"--from-nbdays",
|
||||
dest="from_nbdays",
|
||||
action="store",
|
||||
default=None,
|
||||
help="minimal number of days",
|
||||
)
|
||||
parser.add_option(
|
||||
"--to-nbdays",
|
||||
dest="to_nbdays",
|
||||
action="store",
|
||||
default=None,
|
||||
help="maximal number of days",
|
||||
)
|
||||
parser.add_option(
|
||||
"--dry-run",
|
||||
dest="dry_run",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Dry run mode. JSON ouput only.",
|
||||
)
|
||||
parser.add_option(
|
||||
"--mail-test",
|
||||
dest="mailtest",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Send a test e-mail",
|
||||
)
|
||||
|
||||
def validate_options(self):
|
||||
super(EPN, self).validate_options(needs_root=True)
|
||||
if self.options.to_nbdays is not None:
|
||||
try:
|
||||
if int(self.options.to_nbdays) < 0:
|
||||
raise RuntimeError('Input is negative.')
|
||||
except Exception as e:
|
||||
self.option_parser.error(
|
||||
"--to-nbdays must be a positive integer. "
|
||||
"{error}".format(error=e)
|
||||
)
|
||||
self.options.dry_run = True
|
||||
if self.options.from_nbdays is not None:
|
||||
try:
|
||||
if int(self.options.from_nbdays) < 0:
|
||||
raise RuntimeError('Input is negative.')
|
||||
except Exception as e:
|
||||
self.option_parser.error(
|
||||
"--from-nbdays must be a positive integer. "
|
||||
"{error}".format(error=e)
|
||||
)
|
||||
if self.options.from_nbdays is not None and \
|
||||
self.options.to_nbdays is not None:
|
||||
if int(self.options.from_nbdays) >= int(self.options.to_nbdays):
|
||||
self.option_parser.error(
|
||||
"--from-nbdays must be smaller than --to-nbdays."
|
||||
)
|
||||
if self.options.from_nbdays is not None and \
|
||||
self.options.to_nbdays is None:
|
||||
self.option_parser.error(
|
||||
"You cannot specify --from-nbdays without --to-nbdays"
|
||||
)
|
||||
if self.options.mailtest and self.options.dry_run:
|
||||
self.option_parser.error(
|
||||
"You cannot specify --mail-test and --dry-run together"
|
||||
)
|
||||
|
||||
def setup_logging(self, log_file_mode="a"):
|
||||
super(EPN, self).setup_logging(log_file_mode="a")
|
||||
|
||||
def run(self):
|
||||
super(EPN, self).run()
|
||||
|
||||
if not is_ipa_client_configured():
|
||||
logger.error("IPA client is not configured on this system.")
|
||||
raise admintool.ScriptError()
|
||||
|
||||
self._get_krb5_ticket()
|
||||
self._read_configuration()
|
||||
self._validate_configuration()
|
||||
self._parse_configuration()
|
||||
self._get_connection()
|
||||
self._read_ipa_configuration()
|
||||
drop_privileges()
|
||||
if self.options.mailtest:
|
||||
self._gentestdata()
|
||||
else:
|
||||
if self.options.to_nbdays:
|
||||
self._build_cli_date_ranges()
|
||||
for date_range in self._date_ranges:
|
||||
self._fetch_data_from_ldap(date_range)
|
||||
self._parse_ldap_data()
|
||||
if self.options.dry_run:
|
||||
self._pretty_print_data()
|
||||
else:
|
||||
self._mailer = MailUserAgent(
|
||||
security_protocol=api.env.smtp_security,
|
||||
smtp_hostname=api.env.smtp_server,
|
||||
smtp_port=api.env.smtp_port,
|
||||
smtp_timeout=api.env.smtp_timeout,
|
||||
smtp_username=api.env.smtp_user,
|
||||
smtp_password=api.env.smtp_password,
|
||||
x_mailer=self.command_name,
|
||||
msg_subtype=api.env.msg_subtype,
|
||||
msg_charset=api.env.msg_charset,
|
||||
)
|
||||
self._send_emails()
|
||||
|
||||
def _get_date_range_from_nbdays(self, nbdays_end, nbdays_start=None):
|
||||
"""Detects current time and returns a date range, given a number
|
||||
of days in the future.
|
||||
If only nbdays_end is specified, the range is 1d long.
|
||||
"""
|
||||
now = datetime.utcnow()
|
||||
today_at_midnight = datetime.combine(now, datetime.min.time())
|
||||
range_end = today_at_midnight + timedelta(days=nbdays_end)
|
||||
if nbdays_start is not None:
|
||||
range_start = today_at_midnight + timedelta(days=nbdays_start)
|
||||
else:
|
||||
range_start = range_end - timedelta(days=1)
|
||||
|
||||
logger.debug(
|
||||
"IPA-EPN: Current date: %s \n"
|
||||
"IPA-EPN: Date & time, today at midnight: %s \n"
|
||||
"IPA-EPN: Date range start: %s \n"
|
||||
"IPA-EPN: Date range end: %s \n",
|
||||
now,
|
||||
today_at_midnight,
|
||||
range_start,
|
||||
range_end,
|
||||
)
|
||||
return (range_start, range_end)
|
||||
|
||||
def _datetime_to_generalized_time(self, dt):
|
||||
"""Convert datetime to LDAP_GENERALIZED_TIME_FORMAT
|
||||
Note: Consider moving into ipalib.
|
||||
"""
|
||||
dt = dt.timetuple()
|
||||
generalized_time_str = str(dt.tm_year) + "".join(
|
||||
"0" * (2 - len(str(item))) + str(item)
|
||||
for item in (
|
||||
dt.tm_mon,
|
||||
dt.tm_mday,
|
||||
dt.tm_hour,
|
||||
dt.tm_min,
|
||||
dt.tm_sec,
|
||||
)
|
||||
)
|
||||
return generalized_time_str + "Z"
|
||||
|
||||
def _get_krb5_ticket(self):
|
||||
"""Setup the environment to obtain a krb5 ticket for us using the
|
||||
system keytab.
|
||||
Uses CCACHE = MEMORY (limited to the current process).
|
||||
"""
|
||||
os.environ.setdefault("KRB5_CLIENT_KTNAME", "/etc/krb5.keytab")
|
||||
os.environ["KRB5CCNAME"] = "MEMORY:"
|
||||
|
||||
def _read_configuration(self):
|
||||
"""Merge in the EPN configuration from /etc/ipa/epn.conf"""
|
||||
base_config = dict(
|
||||
context="epn", confdir=paths.ETC_IPA, in_server=False,
|
||||
)
|
||||
api.bootstrap(**base_config)
|
||||
api.env._merge(**EPN_CONFIG)
|
||||
|
||||
if not api.isdone("finalize"):
|
||||
api.finalize()
|
||||
|
||||
def _validate_configuration(self):
|
||||
"""Examine the user-provided configuration.
|
||||
"""
|
||||
if api.env.smtp_security.lower() not in ("none", "starttls", "ssl"):
|
||||
raise RuntimeError(
|
||||
"smtp_security must be one of: " "none, starttls or ssl"
|
||||
)
|
||||
if api.env.smtp_user is not None and api.env.smtp_password is None:
|
||||
raise RuntimeError("smtp_user set and smtp_password is not")
|
||||
if api.env.notify_ttls is None:
|
||||
raise RuntimeError("notify_ttls must be set in %s" % EPN_CONF)
|
||||
try:
|
||||
[int(k) for k in str(api.env.notify_ttls).split(',')]
|
||||
except ValueError as e:
|
||||
raise RuntimeError('Failed to parse notify_ttls: \'%s\': %s' %
|
||||
(api.env.notify_ttls, e))
|
||||
if api.env.smtp_delay:
|
||||
try:
|
||||
float(api.env.smtp_delay)
|
||||
except ValueError as e:
|
||||
raise RuntimeError('smtp_delay is misformatted: %s' % e)
|
||||
if float(api.env.smtp_delay) < 0:
|
||||
raise RuntimeError('smtp_delay cannot be less than zero')
|
||||
|
||||
def _parse_configuration(self):
|
||||
"""
|
||||
"""
|
||||
daylist = [int(k) for k in str(api.env.notify_ttls).split(',')]
|
||||
daylist.sort()
|
||||
|
||||
for day in daylist:
|
||||
self._date_ranges.append(
|
||||
self._get_date_range_from_nbdays(
|
||||
nbdays_start=None, nbdays_end=day + 1
|
||||
)
|
||||
)
|
||||
|
||||
loader = FileSystemLoader(os.path.join(api.env.confdir, 'epn'))
|
||||
self.env = Environment(loader=loader)
|
||||
|
||||
def _read_ipa_configuration(self):
|
||||
"""Get the IPA configuration"""
|
||||
api.Backend.rpcclient.connect()
|
||||
result = api.Command.config_show()['result']
|
||||
self.default_email_domain = result.get('ipadefaultemaildomain',
|
||||
[None])[0]
|
||||
api.Backend.rpcclient.disconnect()
|
||||
|
||||
def _get_connection(self):
|
||||
"""Create a connection to LDAP and bind to it.
|
||||
"""
|
||||
if self._conn is not None:
|
||||
return self._conn
|
||||
|
||||
try:
|
||||
# LDAPI
|
||||
self._conn = ipaldap.LDAPClient.from_realm(api.env.realm)
|
||||
self._conn.external_bind()
|
||||
except Exception:
|
||||
try:
|
||||
# LDAP + GSSAPI
|
||||
self._conn = ipaldap.LDAPClient.from_hostname_secure(
|
||||
api.env.server
|
||||
)
|
||||
self._conn.gssapi_bind()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unable to bind to LDAP server %s: %s",
|
||||
self._conn.ldap_uri,
|
||||
e,
|
||||
)
|
||||
|
||||
return self._conn
|
||||
|
||||
def _fetch_data_from_ldap(self, date_range):
|
||||
"""Run a LDAP query to fetch a list of user entries whose passwords
|
||||
would expire in the near future. Store in self._ldap_data.
|
||||
"""
|
||||
|
||||
if self._conn is None:
|
||||
logger.error(
|
||||
"IPA-EPN: Connection to LDAP not established. Exiting."
|
||||
)
|
||||
|
||||
search_base = DN(api.env.container_user, api.env.basedn)
|
||||
attrs_list = ["uid", "krbpasswordexpiration", "mail", "cn",
|
||||
"givenname", "surname"]
|
||||
|
||||
search_filter = (
|
||||
"(&(!(nsaccountlock=TRUE)) \
|
||||
(krbpasswordexpiration<=%s) \
|
||||
(krbpasswordexpiration>=%s))"
|
||||
% (
|
||||
self._datetime_to_generalized_time(date_range[1]),
|
||||
self._datetime_to_generalized_time(date_range[0]),
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
self._ldap_data = self._conn.get_entries(
|
||||
search_base,
|
||||
filter=search_filter,
|
||||
attrs_list=attrs_list,
|
||||
scope=self._conn.SCOPE_SUBTREE,
|
||||
)
|
||||
except errors.EmptyResult:
|
||||
logger.debug("Empty Result.")
|
||||
finally:
|
||||
logger.debug("%d entries found", len(self._ldap_data))
|
||||
|
||||
def _parse_ldap_data(self):
|
||||
"""Fill out self._expiring_password_user_list from data from ldap.
|
||||
"""
|
||||
if self._ldap_data:
|
||||
for entry in self._ldap_data:
|
||||
self._expiring_password_user_list.add(entry)
|
||||
# Validate json.
|
||||
try:
|
||||
self._pretty_print_data(really_print=False)
|
||||
except Exception as e:
|
||||
logger.error("IPA-EPN: Could not create JSON: %s", e)
|
||||
finally:
|
||||
self._ldap_data = []
|
||||
|
||||
def _pretty_print_data(self, really_print=True):
|
||||
"""Dump self._expiring_password_user_list to JSON.
|
||||
"""
|
||||
self._expiring_password_user_list.json_print(
|
||||
really_print=really_print
|
||||
)
|
||||
|
||||
def _send_emails(self):
|
||||
if self._mailer is None:
|
||||
logger.error("IPA-EPN: mailer was not configured.")
|
||||
return
|
||||
else:
|
||||
try:
|
||||
template = self.env.get_template("expire_msg.template")
|
||||
except TemplateSyntaxError as e:
|
||||
raise RuntimeError("Parsing template %s failed: %s" %
|
||||
(e.filename, e))
|
||||
if api.env.mail_from:
|
||||
mail_from = api.env.mail_from
|
||||
else:
|
||||
mail_from = "noreply@%s" % self.default_email_domain
|
||||
while self._expiring_password_user_list:
|
||||
entry = self._expiring_password_user_list.pop()
|
||||
body = template.render(
|
||||
uid=entry["uid"],
|
||||
first=entry["givenname"],
|
||||
last=entry["sn"],
|
||||
fullname=entry["cn"],
|
||||
expiration=entry["krbpasswordexpiration"],
|
||||
)
|
||||
self._mailer.send_message(
|
||||
mail_subject=api.env.msg_subject,
|
||||
mail_body=body,
|
||||
subscribers=ast.literal_eval(entry["mail"]),
|
||||
mail_from=mail_from,
|
||||
)
|
||||
now = datetime.utcnow()
|
||||
expdate = datetime.strptime(
|
||||
entry["krbpasswordexpiration"],
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
logger.debug(
|
||||
"Notified %s (%s). Password expiring in %d days at %s.",
|
||||
entry["mail"], entry["uid"], (expdate - now).days,
|
||||
expdate)
|
||||
if api.env.smtp_delay:
|
||||
time.sleep(float(api.env.smtp_delay) / 1000)
|
||||
self._mailer.cleanup()
|
||||
|
||||
def _gentestdata(self):
|
||||
"""Generate a sample user to process through the template.
|
||||
"""
|
||||
expdate = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
|
||||
entry = dict(
|
||||
uid=["SAUSER"],
|
||||
cn=["SAMPLE USER"],
|
||||
givenname=["SAMPLE"],
|
||||
sn=["USER"],
|
||||
krbpasswordexpiration=[expdate],
|
||||
mail=[api.env.smtp_admin],
|
||||
)
|
||||
self._expiring_password_user_list.add(entry)
|
||||
|
||||
def _build_cli_date_ranges(self):
|
||||
"""When self.options.to_nbdays is set, override the date ranges read
|
||||
from the configuration file and build the date ranges from the CLI
|
||||
options.
|
||||
"""
|
||||
self._date_ranges = []
|
||||
logger.debug("IPA-EPN: Ignoring configuration file ranges.")
|
||||
if self.options.from_nbdays is not None:
|
||||
self._date_ranges.append(
|
||||
self._get_date_range_from_nbdays(
|
||||
nbdays_start=int(self.options.from_nbdays),
|
||||
nbdays_end=int(self.options.to_nbdays),
|
||||
)
|
||||
)
|
||||
elif self.options.to_nbdays is not None:
|
||||
self._date_ranges.append(
|
||||
self._get_date_range_from_nbdays(
|
||||
nbdays_start=None, nbdays_end=int(self.options.to_nbdays)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class MTAClient:
|
||||
"""MTA Client class. Originally done for EPN.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
security_protocol="none",
|
||||
smtp_hostname="localhost",
|
||||
smtp_port=25,
|
||||
smtp_timeout=60,
|
||||
smtp_username=None,
|
||||
smtp_password=None,
|
||||
):
|
||||
# We only support "none" (cleartext) for now.
|
||||
# Future values: "ssl", "starttls"
|
||||
self._security_protocol = security_protocol
|
||||
self._smtp_hostname = smtp_hostname
|
||||
self._smtp_port = smtp_port
|
||||
self._smtp_timeout = smtp_timeout
|
||||
self._username = smtp_username
|
||||
self._password = smtp_password
|
||||
|
||||
# This should not be touched
|
||||
self._conn = None
|
||||
|
||||
if (
|
||||
self._security_protocol == "none"
|
||||
and "localhost" not in self._smtp_hostname
|
||||
):
|
||||
logger.error(
|
||||
"IPA-EPN: using cleartext for non-localhost SMTPd "
|
||||
"is not supported."
|
||||
)
|
||||
|
||||
self._connect()
|
||||
|
||||
def cleanup(self):
|
||||
self._disconnect()
|
||||
|
||||
def send_message(self, message_str=None, subscribers=None):
|
||||
result = None
|
||||
try:
|
||||
result = self._conn.sendmail(
|
||||
api.env.smtp_admin, subscribers, message_str,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("IPA-EPN: Failed to send mail: %s", e)
|
||||
finally:
|
||||
if result:
|
||||
for key in result:
|
||||
logger.info(
|
||||
"IPA-EPN: Failed to send mail to '%s': %s %s",
|
||||
key,
|
||||
result[key][0],
|
||||
result[key][1],
|
||||
)
|
||||
logger.info(
|
||||
"IPA-EPN: Failed to send mail to at least one recipient"
|
||||
)
|
||||
|
||||
def _connect(self):
|
||||
try:
|
||||
if self._security_protocol.lower() in ["none", "starttls"]:
|
||||
self._conn = smtplib.SMTP(
|
||||
host=self._smtp_hostname,
|
||||
port=self._smtp_port,
|
||||
timeout=self._smtp_timeout,
|
||||
)
|
||||
else:
|
||||
self._conn = smtplib.SMTP_SSL(
|
||||
host=self._smtp_hostname,
|
||||
port=self._smtp_port,
|
||||
timeout=self._smtp_timeout,
|
||||
)
|
||||
except (socketerror, smtplib.SMTPException) as e:
|
||||
msg = \
|
||||
"IPA-EPN: Could not connect to the configured SMTP server: " \
|
||||
"{host}:{port}: {error}".format(
|
||||
host=self._smtp_hostname,
|
||||
port=self._smtp_port,
|
||||
error=e
|
||||
)
|
||||
raise admintool.ScriptError(msg)
|
||||
|
||||
try:
|
||||
self._conn.ehlo()
|
||||
except smtplib.SMTPException as e:
|
||||
logger.error(
|
||||
"IPA-EPN: EHLO failed for host %s:%s: %s",
|
||||
self._smtp_hostname,
|
||||
self._smtp_port,
|
||||
e,
|
||||
)
|
||||
|
||||
if (
|
||||
self._conn.has_extn("STARTTLS")
|
||||
and self._security_protocol.lower() == "starttls"
|
||||
):
|
||||
try:
|
||||
self._conn.starttls()
|
||||
self._conn.ehlo()
|
||||
except smtplib.SMTPException as e:
|
||||
logger.error(
|
||||
"IPA-EPN: Unable to create an encrypted session to "
|
||||
"%s:%s: %s",
|
||||
self._smtp_hostname,
|
||||
self._smtp_port,
|
||||
e,
|
||||
)
|
||||
|
||||
if self._username and self._password:
|
||||
if self._conn.has_extn("AUTH"):
|
||||
try:
|
||||
self._conn.login(self._username, self._password)
|
||||
if self._security_protocol == "none":
|
||||
logger.warning(
|
||||
"IPA-EPN: Username and Password "
|
||||
"were sent in the clear."
|
||||
)
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
raise RuntimeError(
|
||||
"IPA-EPN: Authentication to %s:%s failed, "
|
||||
"please check your username and/or password:" %
|
||||
(self._smtp_hostname,
|
||||
self._smtp_port,)
|
||||
)
|
||||
except smtplib.SMTPException as e:
|
||||
raise RuntimeError(
|
||||
"IPA-EPN: SMTP Error at %s:%s:%s" %
|
||||
(self._smtp_hostname,
|
||||
self._smtp_port,
|
||||
e,)
|
||||
)
|
||||
else:
|
||||
err_str = (
|
||||
"IPA-EPN: Server at %s:%s "
|
||||
"does not support authentication." %
|
||||
(self._smtp_hostname,
|
||||
self._smtp_port,)
|
||||
)
|
||||
logger.error(err_str)
|
||||
|
||||
def _disconnect(self):
|
||||
self._conn.quit()
|
||||
|
||||
|
||||
class MailUserAgent:
|
||||
"""The MUA class for EPN.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
security_protocol="none",
|
||||
smtp_hostname="localhost",
|
||||
smtp_port=25,
|
||||
smtp_timeout=60,
|
||||
smtp_username=None,
|
||||
smtp_password=None,
|
||||
x_mailer=None,
|
||||
msg_subtype="plain",
|
||||
msg_charset="utf8",
|
||||
):
|
||||
|
||||
self._x_mailer = x_mailer
|
||||
self._subject = None
|
||||
self._body = None
|
||||
self._subscribers = None
|
||||
|
||||
self._subtype = msg_subtype
|
||||
self._charset = msg_charset
|
||||
|
||||
self._msg = None
|
||||
self._message_str = None
|
||||
|
||||
self._mta_client = MTAClient(
|
||||
security_protocol=security_protocol,
|
||||
smtp_hostname=smtp_hostname,
|
||||
smtp_port=smtp_port,
|
||||
smtp_timeout=smtp_timeout,
|
||||
smtp_username=smtp_username,
|
||||
smtp_password=smtp_password,
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
self._mta_client.cleanup()
|
||||
|
||||
def send_message(
|
||||
self, mail_subject=None, mail_body=None, subscribers=None,
|
||||
mail_from=None
|
||||
):
|
||||
"""Given mail_subject, mail_body, and subscribers, composes
|
||||
the message and sends it.
|
||||
"""
|
||||
if None in [mail_subject, mail_body, subscribers, mail_from]:
|
||||
logger.error("IPA-EPN: Tried to send an empty message.")
|
||||
return False
|
||||
self._compose_message(
|
||||
mail_subject=mail_subject,
|
||||
mail_body=mail_body,
|
||||
subscribers=subscribers,
|
||||
mail_from=mail_from,
|
||||
)
|
||||
self._mta_client.send_message(
|
||||
message_str=self._message_str, subscribers=subscribers
|
||||
)
|
||||
return True
|
||||
|
||||
def _compose_message(
|
||||
self, mail_subject, mail_body, subscribers, mail_from
|
||||
):
|
||||
"""The composer creates a MIME multipart message.
|
||||
"""
|
||||
|
||||
self._subject = mail_subject
|
||||
self._body = mail_body
|
||||
self._subscribers = subscribers
|
||||
|
||||
self._msg = MIMEMultipart(_charset=self._charset)
|
||||
self._msg["From"] = formataddr(("IPA-EPN", mail_from))
|
||||
self._msg["To"] = ", ".join(self._subscribers)
|
||||
self._msg["Date"] = formatdate(localtime=True)
|
||||
self._msg["Subject"] = Header(self._subject, self._charset)
|
||||
self._msg["Message-Id"] = make_msgid()
|
||||
self._msg.preamble = "Multipart message"
|
||||
if "X-Mailer" not in self._msg and self._x_mailer:
|
||||
self._msg.add_header("X-Mailer", self._x_mailer)
|
||||
self._msg.attach(
|
||||
MIMEText(
|
||||
self._body + "\n\n",
|
||||
_subtype=self._subtype,
|
||||
_charset=self._charset,
|
||||
)
|
||||
)
|
||||
self._message_str = self._msg.as_string()
|
||||
@@ -18,18 +18,566 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import warnings
|
||||
from ipapython.ipachangeconf import IPAChangeConf as realIPAChangeConf
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import six
|
||||
|
||||
if six.PY3:
|
||||
unicode = str
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def openLocked(filename, perms):
|
||||
fd = -1
|
||||
try:
|
||||
fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms)
|
||||
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX)
|
||||
except OSError as e:
|
||||
if fd != -1:
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError:
|
||||
pass
|
||||
raise IOError(e.errno, e.strerror)
|
||||
return os.fdopen(fd, "r+")
|
||||
|
||||
|
||||
class IPAChangeConf(realIPAChangeConf):
|
||||
"""Advertise the old name"""
|
||||
|
||||
#TODO: add subsection as a concept
|
||||
# (ex. REALM.NAME = { foo = x bar = y } )
|
||||
#TODO: put section delimiters as separating element of the list
|
||||
# so that we can process multiple sections in one go
|
||||
#TODO: add a comment all but provided options as a section option
|
||||
class IPAChangeConf(object):
|
||||
def __init__(self, name):
|
||||
"""something"""
|
||||
warnings.warn(
|
||||
"Use 'ipapython.ipachangeconf.IPAChangeConfg'",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
super(IPAChangeConf, self).__init__(name)
|
||||
self.progname = name
|
||||
self.indent = ("", "", "")
|
||||
self.assign = (" = ", "=")
|
||||
self.dassign = self.assign[0]
|
||||
self.comment = ("#",)
|
||||
self.dcomment = self.comment[0]
|
||||
self.eol = ("\n",)
|
||||
self.deol = self.eol[0]
|
||||
self.sectnamdel = ("[", "]")
|
||||
self.subsectdel = ("{", "}")
|
||||
self.case_insensitive_sections = True
|
||||
|
||||
def setProgName(self, name):
|
||||
self.progname = name
|
||||
|
||||
def setIndent(self, indent):
|
||||
if type(indent) is tuple:
|
||||
self.indent = indent
|
||||
elif type(indent) is str:
|
||||
self.indent = (indent, )
|
||||
else:
|
||||
raise ValueError('Indent must be a list of strings')
|
||||
|
||||
def setOptionAssignment(self, assign):
|
||||
if type(assign) is tuple:
|
||||
self.assign = assign
|
||||
else:
|
||||
self.assign = (assign, )
|
||||
self.dassign = self.assign[0]
|
||||
|
||||
def setCommentPrefix(self, comment):
|
||||
if type(comment) is tuple:
|
||||
self.comment = comment
|
||||
else:
|
||||
self.comment = (comment, )
|
||||
self.dcomment = self.comment[0]
|
||||
|
||||
def setEndLine(self, eol):
|
||||
if type(eol) is tuple:
|
||||
self.eol = eol
|
||||
else:
|
||||
self.eol = (eol, )
|
||||
self.deol = self.eol[0]
|
||||
|
||||
def setSectionNameDelimiters(self, delims):
|
||||
self.sectnamdel = delims
|
||||
|
||||
def setSubSectionDelimiters(self, delims):
|
||||
self.subsectdel = delims
|
||||
|
||||
def matchComment(self, line):
|
||||
for v in self.comment:
|
||||
if line.lstrip().startswith(v):
|
||||
return line.lstrip()[len(v):]
|
||||
return False
|
||||
|
||||
def matchEmpty(self, line):
|
||||
if line.strip() == "":
|
||||
return True
|
||||
return False
|
||||
|
||||
def matchSection(self, line):
|
||||
cl = "".join(line.strip().split())
|
||||
cl = cl.lower() if self.case_insensitive_sections else cl
|
||||
|
||||
if len(self.sectnamdel) != 2:
|
||||
return False
|
||||
if not cl.startswith(self.sectnamdel[0]):
|
||||
return False
|
||||
if not cl.endswith(self.sectnamdel[1]):
|
||||
return False
|
||||
return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])]
|
||||
|
||||
def matchSubSection(self, line):
|
||||
if self.matchComment(line):
|
||||
return False
|
||||
|
||||
parts = line.split(self.dassign, 1)
|
||||
if len(parts) < 2:
|
||||
return False
|
||||
|
||||
if parts[1].strip() == self.subsectdel[0]:
|
||||
return parts[0].strip()
|
||||
|
||||
return False
|
||||
|
||||
def matchSubSectionEnd(self, line):
|
||||
if self.matchComment(line):
|
||||
return False
|
||||
|
||||
if line.strip() == self.subsectdel[1]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def getSectionLine(self, section):
|
||||
if len(self.sectnamdel) != 2:
|
||||
return section
|
||||
return self._dump_line(self.sectnamdel[0],
|
||||
section,
|
||||
self.sectnamdel[1],
|
||||
self.deol)
|
||||
|
||||
def _dump_line(self, *args):
|
||||
return u"".join(unicode(x) for x in args)
|
||||
|
||||
def dump(self, options, level=0):
|
||||
output = []
|
||||
if level >= len(self.indent):
|
||||
level = len(self.indent) - 1
|
||||
|
||||
for o in options:
|
||||
if o['type'] == "section":
|
||||
output.append(self._dump_line(self.sectnamdel[0],
|
||||
o['name'],
|
||||
self.sectnamdel[1]))
|
||||
output.append(self.dump(o['value'], (level + 1)))
|
||||
continue
|
||||
if o['type'] == "subsection":
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
self.dassign,
|
||||
self.subsectdel[0]))
|
||||
output.append(self.dump(o['value'], (level + 1)))
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
self.subsectdel[1]))
|
||||
continue
|
||||
if o['type'] == "option":
|
||||
delim = o.get('delim', self.dassign)
|
||||
if delim not in self.assign:
|
||||
raise ValueError('Unknown delim "%s" must be one of "%s"' % (delim, " ".join([d for d in self.assign])))
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
delim,
|
||||
o['value']))
|
||||
continue
|
||||
if o['type'] == "comment":
|
||||
output.append(self._dump_line(self.dcomment, o['value']))
|
||||
continue
|
||||
if o['type'] == "empty":
|
||||
output.append('')
|
||||
continue
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
# append an empty string to the output so that we add eol to the end
|
||||
# of the file contents in a single join()
|
||||
output.append('')
|
||||
return self.deol.join(output)
|
||||
|
||||
def parseLine(self, line):
|
||||
|
||||
if self.matchEmpty(line):
|
||||
return {'name': 'empty', 'type': 'empty'}
|
||||
|
||||
value = self.matchComment(line)
|
||||
if value:
|
||||
return {'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': value.rstrip()} # pylint: disable=E1103
|
||||
|
||||
o = dict()
|
||||
parts = line.split(self.dassign, 1)
|
||||
if len(parts) < 2:
|
||||
# The default assign didn't match, try the non-default
|
||||
for d in self.assign[1:]:
|
||||
parts = line.split(d, 1)
|
||||
if len(parts) >= 2:
|
||||
o['delim'] = d
|
||||
break
|
||||
|
||||
if 'delim' not in o:
|
||||
raise SyntaxError('Syntax Error: Unknown line format')
|
||||
|
||||
o.update({'name':parts[0].strip(), 'type':'option', 'value':parts[1].rstrip()})
|
||||
return o
|
||||
|
||||
def findOpts(self, opts, type, name, exclude_sections=False):
|
||||
|
||||
num = 0
|
||||
for o in opts:
|
||||
if o['type'] == type and o['name'] == name:
|
||||
return (num, o)
|
||||
if exclude_sections and (o['type'] == "section" or
|
||||
o['type'] == "subsection"):
|
||||
return (num, None)
|
||||
num += 1
|
||||
return (num, None)
|
||||
|
||||
def commentOpts(self, inopts, level=0):
|
||||
|
||||
opts = []
|
||||
|
||||
if level >= len(self.indent):
|
||||
level = len(self.indent) - 1
|
||||
|
||||
for o in inopts:
|
||||
if o['type'] == 'section':
|
||||
no = self.commentOpts(o['value'], (level + 1))
|
||||
val = self._dump_line(self.dcomment,
|
||||
self.sectnamdel[0],
|
||||
o['name'],
|
||||
self.sectnamdel[1])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
for n in no:
|
||||
opts.append(n)
|
||||
continue
|
||||
if o['type'] == 'subsection':
|
||||
no = self.commentOpts(o['value'], (level + 1))
|
||||
val = self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
self.dassign,
|
||||
self.subsectdel[0])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
opts.extend(no)
|
||||
val = self._dump_line(self.indent[level], self.subsectdel[1])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
continue
|
||||
if o['type'] == 'option':
|
||||
delim = o.get('delim', self.dassign)
|
||||
if delim not in self.assign:
|
||||
val = self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
delim,
|
||||
o['value'])
|
||||
opts.append({'name':'comment', 'type':'comment', 'value':val})
|
||||
continue
|
||||
if o['type'] == 'comment':
|
||||
opts.append(o)
|
||||
continue
|
||||
if o['type'] == 'empty':
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': ''})
|
||||
continue
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
return opts
|
||||
|
||||
def mergeOld(self, oldopts, newopts):
|
||||
|
||||
opts = []
|
||||
|
||||
for o in oldopts:
|
||||
if o['type'] == "section" or o['type'] == "subsection":
|
||||
_num, no = self.findOpts(newopts, o['type'], o['name'])
|
||||
if not no:
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == "set":
|
||||
mo = self.mergeOld(o['value'], no['value'])
|
||||
opts.append({'name': o['name'],
|
||||
'type': o['type'],
|
||||
'value': mo})
|
||||
continue
|
||||
if no['action'] == "comment":
|
||||
co = self.commentOpts(o['value'])
|
||||
for c in co:
|
||||
opts.append(c)
|
||||
continue
|
||||
if no['action'] == "remove":
|
||||
continue
|
||||
raise SyntaxError('Unknown action: [%s]' % no['action'])
|
||||
|
||||
if o['type'] == "comment" or o['type'] == "empty":
|
||||
opts.append(o)
|
||||
continue
|
||||
|
||||
if o['type'] == "option":
|
||||
_num, no = self.findOpts(newopts, 'option', o['name'], True)
|
||||
if not no:
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == 'comment' or no['action'] == 'remove':
|
||||
if (no['value'] is not None and
|
||||
o['value'] is not no['value']):
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == 'comment':
|
||||
value = self._dump_line(self.dcomment,
|
||||
o['name'],
|
||||
self.dassign,
|
||||
o['value'])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': value})
|
||||
continue
|
||||
if no['action'] == 'set':
|
||||
opts.append(no)
|
||||
continue
|
||||
if no['action'] == 'addifnotset':
|
||||
opts.append({
|
||||
'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': self._dump_line(
|
||||
' ', no['name'], ' modified by IPA'
|
||||
),
|
||||
})
|
||||
opts.append({'name': 'comment', 'type': 'comment',
|
||||
'value': self._dump_line(no['name'],
|
||||
self.dassign,
|
||||
no['value'],
|
||||
)})
|
||||
opts.append(o)
|
||||
continue
|
||||
raise SyntaxError('Unknown action: [%s]' % no['action'])
|
||||
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
return opts
|
||||
|
||||
def mergeNew(self, opts, newopts):
|
||||
|
||||
cline = 0
|
||||
|
||||
for no in newopts:
|
||||
|
||||
if no['type'] == "section" or no['type'] == "subsection":
|
||||
(num, o) = self.findOpts(opts, no['type'], no['name'])
|
||||
if not o:
|
||||
if no['action'] == 'set':
|
||||
opts.append(no)
|
||||
continue
|
||||
if no['action'] == "set":
|
||||
self.mergeNew(o['value'], no['value'])
|
||||
continue
|
||||
cline = num + 1
|
||||
continue
|
||||
|
||||
if no['type'] == "option":
|
||||
(num, o) = self.findOpts(opts, no['type'], no['name'], True)
|
||||
if not o:
|
||||
if no['action'] == 'set' or no['action'] == 'addifnotset':
|
||||
opts.append(no)
|
||||
continue
|
||||
cline = num + 1
|
||||
continue
|
||||
|
||||
if no['type'] == "comment" or no['type'] == "empty":
|
||||
opts.insert(cline, no)
|
||||
cline += 1
|
||||
continue
|
||||
|
||||
raise SyntaxError('Unknown type: [%s]' % no['type'])
|
||||
|
||||
def merge(self, oldopts, newopts):
|
||||
"""
|
||||
Uses a two pass strategy:
|
||||
First we create a new opts tree from oldopts removing/commenting
|
||||
the options as indicated by the contents of newopts
|
||||
Second we fill in the new opts tree with options as indicated
|
||||
in the newopts tree (this is becaus eentire (sub)sections may
|
||||
in the newopts tree (this is becaus entire (sub)sections may
|
||||
exist in the newopts that do not exist in oldopts)
|
||||
"""
|
||||
opts = self.mergeOld(oldopts, newopts)
|
||||
self.mergeNew(opts, newopts)
|
||||
return opts
|
||||
|
||||
#TODO: Make parse() recursive?
|
||||
def parse(self, f):
|
||||
|
||||
opts = []
|
||||
sectopts = []
|
||||
section = None
|
||||
subsectopts = []
|
||||
subsection = None
|
||||
curopts = opts
|
||||
fatheropts = opts
|
||||
|
||||
# Read in the old file.
|
||||
for line in f:
|
||||
|
||||
# It's a section start.
|
||||
value = self.matchSection(line)
|
||||
if value:
|
||||
if section is not None:
|
||||
opts.append({'name': section,
|
||||
'type': 'section',
|
||||
'value': sectopts})
|
||||
sectopts = []
|
||||
curopts = sectopts
|
||||
fatheropts = sectopts
|
||||
section = value
|
||||
continue
|
||||
|
||||
# It's a subsection start.
|
||||
value = self.matchSubSection(line)
|
||||
if value:
|
||||
if subsection is not None:
|
||||
raise SyntaxError('nested subsections are not '
|
||||
'supported yet')
|
||||
subsectopts = []
|
||||
curopts = subsectopts
|
||||
subsection = value
|
||||
continue
|
||||
|
||||
value = self.matchSubSectionEnd(line)
|
||||
if value:
|
||||
if subsection is None:
|
||||
raise SyntaxError('Unmatched end subsection terminator '
|
||||
'found')
|
||||
fatheropts.append({'name': subsection,
|
||||
'type': 'subsection',
|
||||
'value': subsectopts})
|
||||
subsection = None
|
||||
curopts = fatheropts
|
||||
continue
|
||||
|
||||
# Copy anything else as is.
|
||||
try:
|
||||
curopts.append(self.parseLine(line))
|
||||
except SyntaxError as e:
|
||||
raise SyntaxError('{error} in file {fname}: [{line}]'.format(
|
||||
error=e, fname=f.name, line=line.rstrip()))
|
||||
|
||||
#Add last section if any
|
||||
if len(sectopts) is not 0:
|
||||
opts.append({'name': section,
|
||||
'type': 'section',
|
||||
'value': sectopts})
|
||||
|
||||
return opts
|
||||
|
||||
def changeConf(self, file, newopts):
|
||||
"""
|
||||
Write settings to configuration file
|
||||
:param file: path to the file
|
||||
:param options: set of dictionaries in the form:
|
||||
{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}
|
||||
:param section: section name like 'global'
|
||||
"""
|
||||
output = ""
|
||||
f = None
|
||||
try:
|
||||
# Do not catch an unexisting file error
|
||||
# we want to fail in that case
|
||||
shutil.copy2(file, (file + ".ipabkp"))
|
||||
|
||||
f = openLocked(file, 0o644)
|
||||
|
||||
oldopts = self.parse(f)
|
||||
|
||||
options = self.merge(oldopts, newopts)
|
||||
|
||||
output = self.dump(options)
|
||||
|
||||
# Write it out and close it.
|
||||
f.seek(0)
|
||||
f.truncate(0)
|
||||
f.write(output)
|
||||
finally:
|
||||
try:
|
||||
if f:
|
||||
f.close()
|
||||
except IOError:
|
||||
pass
|
||||
logger.debug("Updating configuration file %s", file)
|
||||
logger.debug(output)
|
||||
return True
|
||||
|
||||
def newConf(self, file, options, file_perms=0o644):
|
||||
""""
|
||||
Write settings to a new file, backup the old
|
||||
:param file: path to the file
|
||||
:param options: a set of dictionaries in the form:
|
||||
{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}
|
||||
:param file_perms: number defining the new file's permissions
|
||||
"""
|
||||
output = ""
|
||||
f = None
|
||||
try:
|
||||
try:
|
||||
shutil.copy2(file, (file + ".ipabkp"))
|
||||
except IOError as err:
|
||||
if err.errno == 2:
|
||||
# The orign file did not exist
|
||||
pass
|
||||
|
||||
f = openLocked(file, file_perms)
|
||||
|
||||
# Trunkate
|
||||
f.seek(0)
|
||||
f.truncate(0)
|
||||
|
||||
output = self.dump(options)
|
||||
|
||||
f.write(output)
|
||||
finally:
|
||||
try:
|
||||
if f:
|
||||
f.close()
|
||||
except IOError:
|
||||
pass
|
||||
logger.debug("Writing configuration file %s", file)
|
||||
logger.debug(output)
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def setOption(name, value):
|
||||
return {'name': name,
|
||||
'type': 'option',
|
||||
'action': 'set',
|
||||
'value': value}
|
||||
|
||||
@staticmethod
|
||||
def rmOption(name):
|
||||
return {'name': name,
|
||||
'type': 'option',
|
||||
'action': 'remove',
|
||||
'value': None}
|
||||
|
||||
@staticmethod
|
||||
def setSection(name, options):
|
||||
return {'name': name,
|
||||
'type': 'section',
|
||||
'action': 'set',
|
||||
'value': options}
|
||||
|
||||
@staticmethod
|
||||
def emptyLine():
|
||||
return {'name': 'empty',
|
||||
'type': 'empty'}
|
||||
|
||||
@@ -1,23 +1,569 @@
|
||||
# Authors: Simo Sorce <ssorce@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
|
||||
# Copyright (C) 2007 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import warnings
|
||||
from __future__ import absolute_import
|
||||
|
||||
from ipaclient.discovery import (
|
||||
NOT_FQDN, NO_LDAP_SERVER, REALM_NOT_FOUND, NOT_IPA_SERVER,
|
||||
NO_ACCESS_TO_LDAP, NO_TLS_LDAP, BAD_HOST_CONFIG,
|
||||
UNKNOWN_ERROR, IPA_BASEDN_INFO, error_names, get_ipa_basedn,
|
||||
IPADiscovery
|
||||
)
|
||||
import logging
|
||||
import socket
|
||||
|
||||
__all__ = (
|
||||
'NOT_FQDN', 'NO_LDAP_SERVER', 'REALM_NOT_FOUND',
|
||||
'NOT_IPA_SERVER', 'NO_ACCESS_TO_LDAP', 'NO_TLS_LDAP',
|
||||
'BAD_HOST_CONFIG', 'UNKNOWN_ERROR', 'IPA_BASEDN_INFO',
|
||||
'error_names', 'get_ipa_basedn', 'IPADiscovery')
|
||||
import six
|
||||
|
||||
warnings.warn(
|
||||
"ipaclient.install.ipadiscovery is deprecated, use ipaclient.discovery",
|
||||
DeprecationWarning
|
||||
)
|
||||
from dns import resolver, rdatatype
|
||||
from dns.exception import DNSException
|
||||
from ipalib import errors
|
||||
from ipapython.dnsutil import query_srv
|
||||
from ipapython import ipaldap
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython.ipautil import valid_ip, realm_to_suffix
|
||||
from ipapython.dn import DN
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NOT_FQDN = -1
|
||||
NO_LDAP_SERVER = -2
|
||||
REALM_NOT_FOUND = -3
|
||||
NOT_IPA_SERVER = -4
|
||||
NO_ACCESS_TO_LDAP = -5
|
||||
NO_TLS_LDAP = -6
|
||||
BAD_HOST_CONFIG = -10
|
||||
UNKNOWN_ERROR = -15
|
||||
|
||||
IPA_BASEDN_INFO = 'ipa v2.0'
|
||||
|
||||
error_names = {
|
||||
0: 'Success',
|
||||
NOT_FQDN: 'NOT_FQDN',
|
||||
NO_LDAP_SERVER: 'NO_LDAP_SERVER',
|
||||
REALM_NOT_FOUND: 'REALM_NOT_FOUND',
|
||||
NOT_IPA_SERVER: 'NOT_IPA_SERVER',
|
||||
NO_ACCESS_TO_LDAP: 'NO_ACCESS_TO_LDAP',
|
||||
NO_TLS_LDAP: 'NO_TLS_LDAP',
|
||||
BAD_HOST_CONFIG: 'BAD_HOST_CONFIG',
|
||||
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
|
||||
}
|
||||
|
||||
def get_ipa_basedn(conn):
|
||||
"""
|
||||
Get base DN of IPA suffix in given LDAP server.
|
||||
|
||||
None is returned if the suffix is not found
|
||||
|
||||
:param conn: Bound LDAPClient that will be used for searching
|
||||
"""
|
||||
entry = conn.get_entry(
|
||||
DN(), attrs_list=['defaultnamingcontext', 'namingcontexts'])
|
||||
|
||||
contexts = [c.decode('utf-8') for c in entry.raw['namingcontexts']]
|
||||
if 'defaultnamingcontext' in entry:
|
||||
# If there is a defaultNamingContext examine that one first
|
||||
[default] = entry.raw['defaultnamingcontext']
|
||||
default = default.decode('utf-8')
|
||||
if default in contexts:
|
||||
contexts.remove(default)
|
||||
contexts.insert(0, default)
|
||||
for context in contexts:
|
||||
logger.debug("Check if naming context '%s' is for IPA", context)
|
||||
try:
|
||||
[entry] = conn.get_entries(
|
||||
DN(context), conn.SCOPE_BASE, "(info=IPA*)")
|
||||
except errors.NotFound:
|
||||
logger.debug("LDAP server did not return info attribute to "
|
||||
"check for IPA version")
|
||||
continue
|
||||
[info] = entry.raw['info']
|
||||
info = info.decode('utf-8').lower()
|
||||
if info != IPA_BASEDN_INFO:
|
||||
logger.debug("Detected IPA server version (%s) did not match the "
|
||||
"client (%s)",
|
||||
info, IPA_BASEDN_INFO)
|
||||
continue
|
||||
logger.debug("Naming context '%s' is a valid IPA context", context)
|
||||
return DN(context)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class IPADiscovery(object):
|
||||
|
||||
def __init__(self):
|
||||
self.realm = None
|
||||
self.domain = None
|
||||
self.server = None
|
||||
self.servers = []
|
||||
self.basedn = None
|
||||
|
||||
self.realm_source = None
|
||||
self.domain_source = None
|
||||
self.server_source = None
|
||||
self.basedn_source = None
|
||||
|
||||
def __get_resolver_domains(self):
|
||||
"""
|
||||
Read /etc/resolv.conf and return all the domains found in domain and
|
||||
search.
|
||||
|
||||
Returns a list of (domain, info) pairs. The info contains a reason why
|
||||
the domain is returned.
|
||||
"""
|
||||
domains = []
|
||||
domain = None
|
||||
try:
|
||||
fp = open(paths.RESOLV_CONF, 'r')
|
||||
lines = fp.readlines()
|
||||
fp.close()
|
||||
|
||||
for line in lines:
|
||||
if line.lower().startswith('domain'):
|
||||
domain = (line.split()[-1],
|
||||
'local domain from /etc/resolv.conf')
|
||||
elif line.lower().startswith('search'):
|
||||
domains += [(d, 'search domain from /etc/resolv.conf') for
|
||||
d in line.split()[1:]]
|
||||
except Exception:
|
||||
pass
|
||||
if domain:
|
||||
domains = [domain] + domains
|
||||
return domains
|
||||
|
||||
def getServerName(self):
|
||||
return self.server
|
||||
|
||||
def getDomainName(self):
|
||||
return self.domain
|
||||
|
||||
def getRealmName(self):
|
||||
return self.realm
|
||||
|
||||
def getKDCName(self):
|
||||
return self.kdc
|
||||
|
||||
def getBaseDN(self):
|
||||
return self.basedn
|
||||
|
||||
def check_domain(self, domain, tried, reason):
|
||||
"""
|
||||
Given a domain search it for SRV records, breaking it down to search
|
||||
all subdomains too.
|
||||
|
||||
Returns a tuple (servers, domain) or (None,None) if a SRV record
|
||||
isn't found. servers is a list of servers found. domain is a string.
|
||||
|
||||
:param tried: A set of domains that were tried already
|
||||
:param reason: Reason this domain is searched (included in the log)
|
||||
"""
|
||||
servers = None
|
||||
logger.debug('Start searching for LDAP SRV record in "%s" (%s) '
|
||||
'and its sub-domains', domain, reason)
|
||||
while not servers:
|
||||
if domain in tried:
|
||||
logger.debug("Already searched %s; skipping", domain)
|
||||
break
|
||||
tried.add(domain)
|
||||
|
||||
servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389,
|
||||
break_on_first=False)
|
||||
if servers:
|
||||
return (servers, domain)
|
||||
else:
|
||||
p = domain.find(".")
|
||||
if p == -1: #no ldap server found and last component of the domain already tested
|
||||
return (None, None)
|
||||
domain = domain[p+1:]
|
||||
return (None, None)
|
||||
|
||||
def search(self, domain="", servers="", realm=None, hostname=None, ca_cert_path=None):
|
||||
"""
|
||||
Use DNS discovery to identify valid IPA servers.
|
||||
|
||||
servers may contain an optional list of servers which will be used
|
||||
instead of discovering available LDAP SRV records.
|
||||
|
||||
Returns a constant representing the overall search result.
|
||||
"""
|
||||
logger.debug("[IPA Discovery]")
|
||||
logger.debug(
|
||||
'Starting IPA discovery with domain=%s, servers=%s, hostname=%s',
|
||||
domain, servers, hostname)
|
||||
|
||||
self.server = None
|
||||
autodiscovered = False
|
||||
|
||||
if not servers:
|
||||
|
||||
if not domain: #domain not provided do full DNS discovery
|
||||
|
||||
# get the local host name
|
||||
if not hostname:
|
||||
hostname = socket.getfqdn()
|
||||
logger.debug('Hostname: %s', hostname)
|
||||
if not hostname:
|
||||
return BAD_HOST_CONFIG
|
||||
|
||||
if valid_ip(hostname):
|
||||
return NOT_FQDN
|
||||
|
||||
# first, check for an LDAP server for the local domain
|
||||
p = hostname.find(".")
|
||||
if p == -1: #no domain name
|
||||
return NOT_FQDN
|
||||
domain = hostname[p+1:]
|
||||
|
||||
# Get the list of domains from /etc/resolv.conf, we'll search
|
||||
# them all. We search the domain of our hostname first though.
|
||||
# This is to avoid the situation where domain isn't set in
|
||||
# /etc/resolv.conf and the search list has the hostname domain
|
||||
# not first. We could end up with the wrong SRV record.
|
||||
domains = self.__get_resolver_domains()
|
||||
domains = [(domain, 'domain of the hostname')] + domains
|
||||
tried = set()
|
||||
for domain, reason in domains:
|
||||
servers, domain = self.check_domain(domain, tried, reason)
|
||||
if servers:
|
||||
autodiscovered = True
|
||||
self.domain = domain
|
||||
self.server_source = self.domain_source = (
|
||||
'Discovered LDAP SRV records from %s (%s)' %
|
||||
(domain, reason))
|
||||
break
|
||||
if not self.domain: #no ldap server found
|
||||
logger.debug('No LDAP server found')
|
||||
return NO_LDAP_SERVER
|
||||
else:
|
||||
logger.debug("Search for LDAP SRV record in %s", domain)
|
||||
servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389,
|
||||
break_on_first=False)
|
||||
if servers:
|
||||
autodiscovered = True
|
||||
self.domain = domain
|
||||
self.server_source = self.domain_source = (
|
||||
'Discovered LDAP SRV records from %s' % domain)
|
||||
else:
|
||||
self.server = None
|
||||
logger.debug('No LDAP server found')
|
||||
return NO_LDAP_SERVER
|
||||
|
||||
else:
|
||||
|
||||
logger.debug("Server and domain forced")
|
||||
self.domain = domain
|
||||
self.domain_source = self.server_source = 'Forced'
|
||||
|
||||
#search for kerberos
|
||||
logger.debug("[Kerberos realm search]")
|
||||
if realm:
|
||||
logger.debug("Kerberos realm forced")
|
||||
self.realm = realm
|
||||
self.realm_source = 'Forced'
|
||||
else:
|
||||
realm = self.ipadnssearchkrbrealm()
|
||||
self.realm = realm
|
||||
self.realm_source = (
|
||||
'Discovered Kerberos DNS records from %s' % self.domain)
|
||||
|
||||
if not servers and not realm:
|
||||
return REALM_NOT_FOUND
|
||||
|
||||
if autodiscovered:
|
||||
self.kdc = self.ipadnssearchkrbkdc()
|
||||
self.kdc_source = (
|
||||
'Discovered Kerberos DNS records from %s' % self.domain)
|
||||
else:
|
||||
self.kdc = ', '.join(servers)
|
||||
self.kdc_source = "Kerberos DNS record discovery bypassed"
|
||||
|
||||
# We may have received multiple servers corresponding to the domain
|
||||
# Iterate through all of those to check if it is IPA LDAP server
|
||||
ldapret = [NOT_IPA_SERVER]
|
||||
ldapaccess = True
|
||||
logger.debug("[LDAP server check]")
|
||||
valid_servers = []
|
||||
for server in servers:
|
||||
logger.debug('Verifying that %s (realm %s) is an IPA server',
|
||||
server, self.realm)
|
||||
# check ldap now
|
||||
ldapret = self.ipacheckldap(server, self.realm, ca_cert_path=ca_cert_path)
|
||||
|
||||
if ldapret[0] == 0:
|
||||
self.server = ldapret[1]
|
||||
self.realm = ldapret[2]
|
||||
self.server_source = self.realm_source = (
|
||||
'Discovered from LDAP DNS records in %s' % self.server)
|
||||
valid_servers.append(server)
|
||||
# verified, we actually talked to the remote server and it
|
||||
# is definetely an IPA server
|
||||
if autodiscovered:
|
||||
# No need to keep verifying servers if we discovered them
|
||||
# via DNS
|
||||
break
|
||||
elif ldapret[0] == NO_ACCESS_TO_LDAP or ldapret[0] == NO_TLS_LDAP:
|
||||
ldapaccess = False
|
||||
valid_servers.append(server)
|
||||
# we may set verified_servers below, we don't have it yet
|
||||
if autodiscovered:
|
||||
# No need to keep verifying servers if we discovered them
|
||||
# via DNS
|
||||
break
|
||||
elif ldapret[0] == NOT_IPA_SERVER:
|
||||
logger.warning(
|
||||
'Skip %s: not an IPA server', server)
|
||||
elif ldapret[0] == NO_LDAP_SERVER:
|
||||
logger.warning(
|
||||
'Skip %s: LDAP server is not responding, unable to verify '
|
||||
'if this is an IPA server', server)
|
||||
else:
|
||||
logger.warning(
|
||||
'Skip %s: cannot verify if this is an IPA server', server)
|
||||
|
||||
# If one of LDAP servers checked rejects access (maybe anonymous
|
||||
# bind is disabled), assume realm and basedn generated off domain.
|
||||
# Note that in case ldapret[0] == 0 and ldapaccess == False (one of
|
||||
# servers didn't provide access but another one succeeded), self.realm
|
||||
# will be set already to a proper value above, self.basdn will be
|
||||
# initialized during the LDAP check itself and we'll skip these two checks.
|
||||
if not ldapaccess and self.realm is None:
|
||||
# Assume realm is the same as domain.upper()
|
||||
self.realm = self.domain.upper()
|
||||
self.realm_source = 'Assumed same as domain'
|
||||
logger.debug(
|
||||
"Assuming realm is the same as domain: %s", self.realm)
|
||||
|
||||
if not ldapaccess and self.basedn is None:
|
||||
# Generate suffix from realm
|
||||
self.basedn = realm_to_suffix(self.realm)
|
||||
self.basedn_source = 'Generated from Kerberos realm'
|
||||
logger.debug("Generated basedn from realm: %s", self.basedn)
|
||||
|
||||
logger.debug(
|
||||
"Discovery result: %s; server=%s, domain=%s, kdc=%s, basedn=%s",
|
||||
error_names.get(ldapret[0], ldapret[0]),
|
||||
self.server, self.domain, self.kdc, self.basedn)
|
||||
|
||||
logger.debug("Validated servers: %s", ','.join(valid_servers))
|
||||
self.servers = valid_servers
|
||||
|
||||
# If we have any servers left then override the last return value
|
||||
# to indicate success.
|
||||
if valid_servers:
|
||||
self.server = servers[0]
|
||||
ldapret[0] = 0
|
||||
|
||||
return ldapret[0]
|
||||
|
||||
def ipacheckldap(self, thost, trealm, ca_cert_path=None):
|
||||
"""
|
||||
Given a host and kerberos realm verify that it is an IPA LDAP
|
||||
server hosting the realm.
|
||||
|
||||
Returns a list [errno, host, realm] or an empty list on error.
|
||||
Errno is an error number:
|
||||
0 means all ok
|
||||
1 means we could not check the info in LDAP (may happend when
|
||||
anonymous binds are disabled)
|
||||
2 means the server is certainly not an IPA server
|
||||
"""
|
||||
|
||||
lrealms = []
|
||||
|
||||
#now verify the server is really an IPA server
|
||||
try:
|
||||
ldap_uri = ipaldap.get_ldap_uri(thost)
|
||||
start_tls = False
|
||||
if ca_cert_path:
|
||||
start_tls = True
|
||||
logger.debug("Init LDAP connection to: %s", ldap_uri)
|
||||
lh = ipaldap.LDAPClient(
|
||||
ldap_uri, cacert=ca_cert_path, start_tls=start_tls,
|
||||
no_schema=True, decode_attrs=False)
|
||||
try:
|
||||
lh.simple_bind(DN(), '')
|
||||
|
||||
# get IPA base DN
|
||||
logger.debug("Search LDAP server for IPA base DN")
|
||||
basedn = get_ipa_basedn(lh)
|
||||
except errors.ACIError:
|
||||
logger.debug("LDAP Error: Anonymous access not allowed")
|
||||
return [NO_ACCESS_TO_LDAP]
|
||||
except errors.DatabaseError as err:
|
||||
logger.error("Error checking LDAP: %s", err.strerror)
|
||||
# We should only get UNWILLING_TO_PERFORM if the remote LDAP
|
||||
# server has minssf > 0 and we have attempted a non-TLS conn.
|
||||
if ca_cert_path is None:
|
||||
logger.debug(
|
||||
"Cannot connect to LDAP server. Check that minssf is "
|
||||
"not enabled")
|
||||
return [NO_TLS_LDAP]
|
||||
else:
|
||||
return [UNKNOWN_ERROR]
|
||||
|
||||
if basedn is None:
|
||||
logger.debug("The server is not an IPA server")
|
||||
return [NOT_IPA_SERVER]
|
||||
|
||||
self.basedn = basedn
|
||||
self.basedn_source = 'From IPA server %s' % lh.ldap_uri
|
||||
|
||||
#search and return known realms
|
||||
logger.debug(
|
||||
"Search for (objectClass=krbRealmContainer) in %s (sub)",
|
||||
self.basedn)
|
||||
try:
|
||||
lret = lh.get_entries(
|
||||
DN(('cn', 'kerberos'), self.basedn),
|
||||
lh.SCOPE_SUBTREE, "(objectClass=krbRealmContainer)")
|
||||
except errors.NotFound:
|
||||
#something very wrong
|
||||
return [REALM_NOT_FOUND]
|
||||
|
||||
for lres in lret:
|
||||
logger.debug("Found: %s", lres.dn)
|
||||
[cn] = lres.raw['cn']
|
||||
if six.PY3:
|
||||
cn = cn.decode('utf-8')
|
||||
lrealms.append(cn)
|
||||
|
||||
if trealm:
|
||||
for r in lrealms:
|
||||
if trealm == r:
|
||||
return [0, thost, trealm]
|
||||
# must match or something is very wrong
|
||||
logger.debug("Realm %s does not match any realm in LDAP "
|
||||
"database", trealm)
|
||||
return [REALM_NOT_FOUND]
|
||||
else:
|
||||
if len(lrealms) != 1:
|
||||
#which one? we can't attach to a multi-realm server without DNS working
|
||||
logger.debug("Multiple realms found, cannot decide "
|
||||
"which realm is the right without "
|
||||
"working DNS")
|
||||
return [REALM_NOT_FOUND]
|
||||
else:
|
||||
return [0, thost, lrealms[0]]
|
||||
|
||||
#we shouldn't get here
|
||||
assert False, "Unknown error in ipadiscovery"
|
||||
|
||||
except errors.DatabaseTimeout:
|
||||
logger.debug("LDAP Error: timeout")
|
||||
return [NO_LDAP_SERVER]
|
||||
except errors.NetworkError as err:
|
||||
logger.debug("LDAP Error: %s", err.strerror)
|
||||
return [NO_LDAP_SERVER]
|
||||
except errors.ACIError:
|
||||
logger.debug("LDAP Error: Anonymous access not allowed")
|
||||
return [NO_ACCESS_TO_LDAP]
|
||||
except errors.DatabaseError as err:
|
||||
logger.debug("Error checking LDAP: %s", err.strerror)
|
||||
return [UNKNOWN_ERROR]
|
||||
except Exception as err:
|
||||
logger.debug("Error checking LDAP: %s", err)
|
||||
|
||||
return [UNKNOWN_ERROR]
|
||||
|
||||
|
||||
def ipadns_search_srv(self, domain, srv_record_name, default_port,
|
||||
break_on_first=True):
|
||||
"""
|
||||
Search for SRV records in given domain. When no record is found,
|
||||
en empty list is returned
|
||||
|
||||
:param domain: Search domain name
|
||||
:param srv_record_name: SRV record name, e.g. "_ldap._tcp"
|
||||
:param default_port: When default_port is not None, it is being
|
||||
checked with the port in SRV record and if they don't
|
||||
match, the port from SRV record is appended to
|
||||
found hostname in this format: "hostname:port"
|
||||
:param break_on_first: break on the first find and return just one
|
||||
entry
|
||||
"""
|
||||
servers = []
|
||||
|
||||
qname = '%s.%s' % (srv_record_name, domain)
|
||||
|
||||
logger.debug("Search DNS for SRV record of %s", qname)
|
||||
|
||||
try:
|
||||
answers = query_srv(qname)
|
||||
except DNSException as e:
|
||||
logger.debug("DNS record not found: %s", e.__class__.__name__)
|
||||
answers = []
|
||||
|
||||
for answer in answers:
|
||||
logger.debug("DNS record found: %s", answer)
|
||||
server = str(answer.target).rstrip(".")
|
||||
if not server:
|
||||
logger.debug("Cannot parse the hostname from SRV record: %s",
|
||||
answer)
|
||||
continue
|
||||
if default_port is not None and answer.port != default_port:
|
||||
server = "%s:%s" % (server, str(answer.port))
|
||||
servers.append(server)
|
||||
if break_on_first:
|
||||
break
|
||||
|
||||
return servers
|
||||
|
||||
def ipadnssearchkrbrealm(self, domain=None):
|
||||
"""
|
||||
:param domain: Domain to be searched in
|
||||
:returns: string of a realm found in a TXT record
|
||||
None if no realm was found
|
||||
"""
|
||||
if not domain:
|
||||
domain = self.domain
|
||||
# now, check for a Kerberos realm the local host or domain is in
|
||||
qname = "_kerberos." + domain
|
||||
|
||||
logger.debug("Search DNS for TXT record of %s", qname)
|
||||
|
||||
try:
|
||||
answers = resolver.query(qname, rdatatype.TXT)
|
||||
except DNSException as e:
|
||||
logger.debug("DNS record not found: %s", e.__class__.__name__)
|
||||
answers = []
|
||||
|
||||
realm = None
|
||||
for answer in answers:
|
||||
logger.debug("DNS record found: %s", answer)
|
||||
if answer.strings:
|
||||
try:
|
||||
realm = answer.strings[0].decode('utf-8')
|
||||
except UnicodeDecodeError as e:
|
||||
logger.debug(
|
||||
'A TXT record cannot be decoded as UTF-8: %s', e)
|
||||
continue
|
||||
if realm:
|
||||
return realm
|
||||
return None
|
||||
|
||||
def ipadnssearchkrbkdc(self, domain=None):
|
||||
kdc = None
|
||||
|
||||
if not domain:
|
||||
domain = self.domain
|
||||
|
||||
kdc = self.ipadns_search_srv(domain, '_kerberos._udp', 88,
|
||||
break_on_first=False)
|
||||
|
||||
if kdc:
|
||||
kdc = ','.join(kdc)
|
||||
else:
|
||||
logger.debug("SRV record for KDC not found! Domain: %s", domain)
|
||||
kdc = None
|
||||
|
||||
return kdc
|
||||
|
||||
@@ -28,7 +28,6 @@ from ipapython import ipautil
|
||||
from ipaplatform.tasks import tasks
|
||||
from ipaplatform import services
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython.ipautil import user_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -40,36 +39,6 @@ def __backup_config(path, fstore=None):
|
||||
shutil.copy(path, "%s.ipasave" % (path))
|
||||
|
||||
|
||||
def get_time_source():
|
||||
"""
|
||||
While in interactive installation user has to specify NTP server or pool
|
||||
to be used in chrony configuration. This method asks user input on these
|
||||
values in case that they were not specified before installation start.
|
||||
"""
|
||||
ntp_servers = []
|
||||
ntp_pool = ""
|
||||
|
||||
if ipautil.user_input("Do you want to configure chrony "
|
||||
"with NTP server or pool address?", False):
|
||||
servers = user_input("Enter NTP source server addresses separated by "
|
||||
"comma, or press Enter to skip", allow_empty=True)
|
||||
if servers: # if user input is not '' (empty)
|
||||
logger.debug("User provided NTP server(s):")
|
||||
# cut possible multiple servers separated by comma into list
|
||||
for server in servers.split(","):
|
||||
# users tend to separate servers by ", " so strip() whitespaces
|
||||
server = server.strip()
|
||||
ntp_servers.append(server)
|
||||
logger.debug("\t%s", server)
|
||||
|
||||
ntp_pool = user_input("Enter a NTP source pool address, "
|
||||
"or press Enter to skip", allow_empty=True)
|
||||
if ntp_pool: # if user input is not '' (empty)
|
||||
logger.debug("User provided NTP pool:\t%s", ntp_pool)
|
||||
|
||||
return ntp_servers, ntp_pool
|
||||
|
||||
|
||||
def sync_chrony():
|
||||
"""
|
||||
This method enables chronyd service on boot and restarts it to reload
|
||||
|
||||
Reference in New Issue
Block a user