Imported Debian patch 4.0.5-6~numeezy
This commit is contained in:
committed by
Mario Fetka
parent
c44de33144
commit
10dfc9587b
16
ipa-client/ipaclient/Makefile.am
Normal file
16
ipa-client/ipaclient/Makefile.am
Normal file
@@ -0,0 +1,16 @@
|
||||
NULL =
|
||||
|
||||
appdir = $(pythondir)/ipaclient
|
||||
app_PYTHON = \
|
||||
__init__.py \
|
||||
ipachangeconf.py \
|
||||
ipadiscovery.py \
|
||||
ntpconf.py \
|
||||
$(NULL)
|
||||
|
||||
EXTRA_DIST = \
|
||||
$(NULL)
|
||||
|
||||
MAINTAINERCLEANFILES = \
|
||||
*~ \
|
||||
Makefile.in
|
||||
539
ipa-client/ipaclient/Makefile.in
Normal file
539
ipa-client/ipaclient/Makefile.in
Normal file
@@ -0,0 +1,539 @@
|
||||
# Makefile.in generated by automake 1.14.1 from Makefile.am.
|
||||
# @configure_input@
|
||||
|
||||
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
|
||||
|
||||
# This Makefile.in is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
# with or without modifications, as long as this notice is preserved.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
|
||||
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
|
||||
# PARTICULAR PURPOSE.
|
||||
|
||||
@SET_MAKE@
|
||||
VPATH = @srcdir@
|
||||
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
|
||||
am__make_running_with_option = \
|
||||
case $${target_option-} in \
|
||||
?) ;; \
|
||||
*) echo "am__make_running_with_option: internal error: invalid" \
|
||||
"target option '$${target_option-}' specified" >&2; \
|
||||
exit 1;; \
|
||||
esac; \
|
||||
has_opt=no; \
|
||||
sane_makeflags=$$MAKEFLAGS; \
|
||||
if $(am__is_gnu_make); then \
|
||||
sane_makeflags=$$MFLAGS; \
|
||||
else \
|
||||
case $$MAKEFLAGS in \
|
||||
*\\[\ \ ]*) \
|
||||
bs=\\; \
|
||||
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
|
||||
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
|
||||
esac; \
|
||||
fi; \
|
||||
skip_next=no; \
|
||||
strip_trailopt () \
|
||||
{ \
|
||||
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
|
||||
}; \
|
||||
for flg in $$sane_makeflags; do \
|
||||
test $$skip_next = yes && { skip_next=no; continue; }; \
|
||||
case $$flg in \
|
||||
*=*|--*) continue;; \
|
||||
-*I) strip_trailopt 'I'; skip_next=yes;; \
|
||||
-*I?*) strip_trailopt 'I';; \
|
||||
-*O) strip_trailopt 'O'; skip_next=yes;; \
|
||||
-*O?*) strip_trailopt 'O';; \
|
||||
-*l) strip_trailopt 'l'; skip_next=yes;; \
|
||||
-*l?*) strip_trailopt 'l';; \
|
||||
-[dEDm]) skip_next=yes;; \
|
||||
-[JT]) skip_next=yes;; \
|
||||
esac; \
|
||||
case $$flg in \
|
||||
*$$target_option*) has_opt=yes; break;; \
|
||||
esac; \
|
||||
done; \
|
||||
test $$has_opt = yes
|
||||
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
|
||||
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
|
||||
pkgdatadir = $(datadir)/@PACKAGE@
|
||||
pkgincludedir = $(includedir)/@PACKAGE@
|
||||
pkglibdir = $(libdir)/@PACKAGE@
|
||||
pkglibexecdir = $(libexecdir)/@PACKAGE@
|
||||
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
|
||||
install_sh_DATA = $(install_sh) -c -m 644
|
||||
install_sh_PROGRAM = $(install_sh) -c
|
||||
install_sh_SCRIPT = $(install_sh) -c
|
||||
INSTALL_HEADER = $(INSTALL_DATA)
|
||||
transform = $(program_transform_name)
|
||||
NORMAL_INSTALL = :
|
||||
PRE_INSTALL = :
|
||||
POST_INSTALL = :
|
||||
NORMAL_UNINSTALL = :
|
||||
PRE_UNINSTALL = :
|
||||
POST_UNINSTALL = :
|
||||
build_triplet = @build@
|
||||
host_triplet = @host@
|
||||
subdir = ipaclient
|
||||
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
|
||||
$(app_PYTHON) $(top_srcdir)/py-compile
|
||||
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
|
||||
am__aclocal_m4_deps = $(top_srcdir)/version.m4 \
|
||||
$(top_srcdir)/configure.ac
|
||||
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
|
||||
$(ACLOCAL_M4)
|
||||
mkinstalldirs = $(install_sh) -d
|
||||
CONFIG_HEADER = $(top_builddir)/config.h
|
||||
CONFIG_CLEAN_FILES =
|
||||
CONFIG_CLEAN_VPATH_FILES =
|
||||
AM_V_P = $(am__v_P_@AM_V@)
|
||||
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
|
||||
am__v_P_0 = false
|
||||
am__v_P_1 = :
|
||||
AM_V_GEN = $(am__v_GEN_@AM_V@)
|
||||
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
|
||||
am__v_GEN_0 = @echo " GEN " $@;
|
||||
am__v_GEN_1 =
|
||||
AM_V_at = $(am__v_at_@AM_V@)
|
||||
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
|
||||
am__v_at_0 = @
|
||||
am__v_at_1 =
|
||||
SOURCES =
|
||||
DIST_SOURCES =
|
||||
am__can_run_installinfo = \
|
||||
case $$AM_UPDATE_INFO_DIR in \
|
||||
n|no|NO) false;; \
|
||||
*) (install-info --version) >/dev/null 2>&1;; \
|
||||
esac
|
||||
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
|
||||
am__vpath_adj = case $$p in \
|
||||
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
|
||||
*) f=$$p;; \
|
||||
esac;
|
||||
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
|
||||
am__install_max = 40
|
||||
am__nobase_strip_setup = \
|
||||
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
|
||||
am__nobase_strip = \
|
||||
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
|
||||
am__nobase_list = $(am__nobase_strip_setup); \
|
||||
for p in $$list; do echo "$$p $$p"; done | \
|
||||
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
|
||||
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
|
||||
if (++n[$$2] == $(am__install_max)) \
|
||||
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
|
||||
END { for (dir in files) print dir, files[dir] }'
|
||||
am__base_list = \
|
||||
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
|
||||
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
|
||||
am__uninstall_files_from_dir = { \
|
||||
test -z "$$files" \
|
||||
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|
||||
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
|
||||
$(am__cd) "$$dir" && rm -f $$files; }; \
|
||||
}
|
||||
am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile)
|
||||
am__installdirs = "$(DESTDIR)$(appdir)"
|
||||
am__pep3147_tweak = \
|
||||
sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|'
|
||||
py_compile = $(top_srcdir)/py-compile
|
||||
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
|
||||
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
|
||||
ACLOCAL = @ACLOCAL@
|
||||
AMTAR = @AMTAR@
|
||||
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
|
||||
AR = @AR@
|
||||
AUTOCONF = @AUTOCONF@
|
||||
AUTOHEADER = @AUTOHEADER@
|
||||
AUTOMAKE = @AUTOMAKE@
|
||||
AWK = @AWK@
|
||||
CC = @CC@
|
||||
CCDEPMODE = @CCDEPMODE@
|
||||
CFLAGS = @CFLAGS@
|
||||
CPP = @CPP@
|
||||
CPPFLAGS = @CPPFLAGS@
|
||||
CURL_LIBS = @CURL_LIBS@
|
||||
CYGPATH_W = @CYGPATH_W@
|
||||
DEFS = @DEFS@
|
||||
DEPDIR = @DEPDIR@
|
||||
DLLTOOL = @DLLTOOL@
|
||||
DSYMUTIL = @DSYMUTIL@
|
||||
DUMPBIN = @DUMPBIN@
|
||||
ECHO_C = @ECHO_C@
|
||||
ECHO_N = @ECHO_N@
|
||||
ECHO_T = @ECHO_T@
|
||||
EGREP = @EGREP@
|
||||
EXEEXT = @EXEEXT@
|
||||
FGREP = @FGREP@
|
||||
GREP = @GREP@
|
||||
INSTALL = @INSTALL@
|
||||
INSTALL_DATA = @INSTALL_DATA@
|
||||
INSTALL_PROGRAM = @INSTALL_PROGRAM@
|
||||
INSTALL_SCRIPT = @INSTALL_SCRIPT@
|
||||
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
|
||||
IPA_DATA_DIR = @IPA_DATA_DIR@
|
||||
KRB5_LIBS = @KRB5_LIBS@
|
||||
LD = @LD@
|
||||
LDFLAGS = @LDFLAGS@
|
||||
LIBINTL_LIBS = @LIBINTL_LIBS@
|
||||
LIBOBJS = @LIBOBJS@
|
||||
LIBS = @LIBS@
|
||||
LIBTOOL = @LIBTOOL@
|
||||
LIPO = @LIPO@
|
||||
LN_S = @LN_S@
|
||||
LTLIBOBJS = @LTLIBOBJS@
|
||||
MAINT = @MAINT@
|
||||
MAKEINFO = @MAKEINFO@
|
||||
MANIFEST_TOOL = @MANIFEST_TOOL@
|
||||
MKDIR_P = @MKDIR_P@
|
||||
NM = @NM@
|
||||
NMEDIT = @NMEDIT@
|
||||
OBJDUMP = @OBJDUMP@
|
||||
OBJEXT = @OBJEXT@
|
||||
OPENLDAP_LIBS = @OPENLDAP_LIBS@
|
||||
OTOOL = @OTOOL@
|
||||
OTOOL64 = @OTOOL64@
|
||||
PACKAGE = @PACKAGE@
|
||||
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
|
||||
PACKAGE_NAME = @PACKAGE_NAME@
|
||||
PACKAGE_STRING = @PACKAGE_STRING@
|
||||
PACKAGE_TARNAME = @PACKAGE_TARNAME@
|
||||
PACKAGE_URL = @PACKAGE_URL@
|
||||
PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
POPT_LIBS = @POPT_LIBS@
|
||||
PYTHON = @PYTHON@
|
||||
PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
|
||||
PYTHON_PLATFORM = @PYTHON_PLATFORM@
|
||||
PYTHON_PREFIX = @PYTHON_PREFIX@
|
||||
PYTHON_VERSION = @PYTHON_VERSION@
|
||||
RANLIB = @RANLIB@
|
||||
SASL_LIBS = @SASL_LIBS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
STRIP = @STRIP@
|
||||
VERSION = @VERSION@
|
||||
XMLRPC_LIBS = @XMLRPC_LIBS@
|
||||
abs_builddir = @abs_builddir@
|
||||
abs_srcdir = @abs_srcdir@
|
||||
abs_top_builddir = @abs_top_builddir@
|
||||
abs_top_srcdir = @abs_top_srcdir@
|
||||
ac_ct_AR = @ac_ct_AR@
|
||||
ac_ct_CC = @ac_ct_CC@
|
||||
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
|
||||
am__include = @am__include@
|
||||
am__leading_dot = @am__leading_dot@
|
||||
am__quote = @am__quote@
|
||||
am__tar = @am__tar@
|
||||
am__untar = @am__untar@
|
||||
bindir = @bindir@
|
||||
build = @build@
|
||||
build_alias = @build_alias@
|
||||
build_cpu = @build_cpu@
|
||||
build_os = @build_os@
|
||||
build_vendor = @build_vendor@
|
||||
builddir = @builddir@
|
||||
datadir = @datadir@
|
||||
datarootdir = @datarootdir@
|
||||
docdir = @docdir@
|
||||
dvidir = @dvidir@
|
||||
exec_prefix = @exec_prefix@
|
||||
host = @host@
|
||||
host_alias = @host_alias@
|
||||
host_cpu = @host_cpu@
|
||||
host_os = @host_os@
|
||||
host_vendor = @host_vendor@
|
||||
htmldir = @htmldir@
|
||||
includedir = @includedir@
|
||||
infodir = @infodir@
|
||||
install_sh = @install_sh@
|
||||
libdir = @libdir@
|
||||
libexecdir = @libexecdir@
|
||||
localedir = @localedir@
|
||||
localstatedir = @localstatedir@
|
||||
mandir = @mandir@
|
||||
mkdir_p = @mkdir_p@
|
||||
oldincludedir = @oldincludedir@
|
||||
pdfdir = @pdfdir@
|
||||
pkgpyexecdir = @pkgpyexecdir@
|
||||
pkgpythondir = @pkgpythondir@
|
||||
prefix = @prefix@
|
||||
program_transform_name = @program_transform_name@
|
||||
psdir = @psdir@
|
||||
pyexecdir = @pyexecdir@
|
||||
pythondir = @pythondir@
|
||||
sbindir = @sbindir@
|
||||
sharedstatedir = @sharedstatedir@
|
||||
srcdir = @srcdir@
|
||||
sysconfdir = @sysconfdir@
|
||||
target_alias = @target_alias@
|
||||
top_build_prefix = @top_build_prefix@
|
||||
top_builddir = @top_builddir@
|
||||
top_srcdir = @top_srcdir@
|
||||
NULL =
|
||||
appdir = $(pythondir)/ipaclient
|
||||
app_PYTHON = \
|
||||
__init__.py \
|
||||
ipachangeconf.py \
|
||||
ipadiscovery.py \
|
||||
ntpconf.py \
|
||||
$(NULL)
|
||||
|
||||
EXTRA_DIST = \
|
||||
$(NULL)
|
||||
|
||||
MAINTAINERCLEANFILES = \
|
||||
*~ \
|
||||
Makefile.in
|
||||
|
||||
all: all-am
|
||||
|
||||
.SUFFIXES:
|
||||
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
|
||||
@for dep in $?; do \
|
||||
case '$(am__configure_deps)' in \
|
||||
*$$dep*) \
|
||||
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
|
||||
&& { if test -f $@; then exit 0; else break; fi; }; \
|
||||
exit 1;; \
|
||||
esac; \
|
||||
done; \
|
||||
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign ipaclient/Makefile'; \
|
||||
$(am__cd) $(top_srcdir) && \
|
||||
$(AUTOMAKE) --foreign ipaclient/Makefile
|
||||
.PRECIOUS: Makefile
|
||||
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
|
||||
@case '$?' in \
|
||||
*config.status*) \
|
||||
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
|
||||
*) \
|
||||
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
|
||||
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
|
||||
esac;
|
||||
|
||||
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
|
||||
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
|
||||
|
||||
$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
|
||||
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
|
||||
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
|
||||
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
|
||||
$(am__aclocal_m4_deps):
|
||||
|
||||
mostlyclean-libtool:
|
||||
-rm -f *.lo
|
||||
|
||||
clean-libtool:
|
||||
-rm -rf .libs _libs
|
||||
install-appPYTHON: $(app_PYTHON)
|
||||
@$(NORMAL_INSTALL)
|
||||
@list='$(app_PYTHON)'; dlist=; list2=; test -n "$(appdir)" || list=; \
|
||||
if test -n "$$list"; then \
|
||||
echo " $(MKDIR_P) '$(DESTDIR)$(appdir)'"; \
|
||||
$(MKDIR_P) "$(DESTDIR)$(appdir)" || exit 1; \
|
||||
fi; \
|
||||
for p in $$list; do \
|
||||
if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \
|
||||
if test -f $$b$$p; then \
|
||||
$(am__strip_dir) \
|
||||
dlist="$$dlist $$f"; \
|
||||
list2="$$list2 $$b$$p"; \
|
||||
else :; fi; \
|
||||
done; \
|
||||
for file in $$list2; do echo $$file; done | $(am__base_list) | \
|
||||
while read files; do \
|
||||
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(appdir)'"; \
|
||||
$(INSTALL_DATA) $$files "$(DESTDIR)$(appdir)" || exit $$?; \
|
||||
done || exit $$?; \
|
||||
if test -n "$$dlist"; then \
|
||||
$(am__py_compile) --destdir "$(DESTDIR)" \
|
||||
--basedir "$(appdir)" $$dlist; \
|
||||
else :; fi
|
||||
|
||||
uninstall-appPYTHON:
|
||||
@$(NORMAL_UNINSTALL)
|
||||
@list='$(app_PYTHON)'; test -n "$(appdir)" || list=; \
|
||||
py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
|
||||
test -n "$$py_files" || exit 0; \
|
||||
dir='$(DESTDIR)$(appdir)'; \
|
||||
pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \
|
||||
pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \
|
||||
py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \
|
||||
echo "$$py_files_pep3147";\
|
||||
pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \
|
||||
pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \
|
||||
st=0; \
|
||||
for files in \
|
||||
"$$py_files" \
|
||||
"$$pyc_files" \
|
||||
"$$pyo_files" \
|
||||
"$$pyc_files_pep3147" \
|
||||
"$$pyo_files_pep3147" \
|
||||
; do \
|
||||
$(am__uninstall_files_from_dir) || st=$$?; \
|
||||
done; \
|
||||
exit $$st
|
||||
tags TAGS:
|
||||
|
||||
ctags CTAGS:
|
||||
|
||||
cscope cscopelist:
|
||||
|
||||
|
||||
distdir: $(DISTFILES)
|
||||
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
|
||||
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
|
||||
list='$(DISTFILES)'; \
|
||||
dist_files=`for file in $$list; do echo $$file; done | \
|
||||
sed -e "s|^$$srcdirstrip/||;t" \
|
||||
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
|
||||
case $$dist_files in \
|
||||
*/*) $(MKDIR_P) `echo "$$dist_files" | \
|
||||
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
|
||||
sort -u` ;; \
|
||||
esac; \
|
||||
for file in $$dist_files; do \
|
||||
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
|
||||
if test -d $$d/$$file; then \
|
||||
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
|
||||
if test -d "$(distdir)/$$file"; then \
|
||||
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
|
||||
fi; \
|
||||
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
|
||||
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
|
||||
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
|
||||
fi; \
|
||||
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
|
||||
else \
|
||||
test -f "$(distdir)/$$file" \
|
||||
|| cp -p $$d/$$file "$(distdir)/$$file" \
|
||||
|| exit 1; \
|
||||
fi; \
|
||||
done
|
||||
check-am: all-am
|
||||
check: check-am
|
||||
all-am: Makefile
|
||||
installdirs:
|
||||
for dir in "$(DESTDIR)$(appdir)"; do \
|
||||
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
|
||||
done
|
||||
install: install-am
|
||||
install-exec: install-exec-am
|
||||
install-data: install-data-am
|
||||
uninstall: uninstall-am
|
||||
|
||||
install-am: all-am
|
||||
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
|
||||
|
||||
installcheck: installcheck-am
|
||||
install-strip:
|
||||
if test -z '$(STRIP)'; then \
|
||||
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
|
||||
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
|
||||
install; \
|
||||
else \
|
||||
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
|
||||
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
|
||||
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
|
||||
fi
|
||||
mostlyclean-generic:
|
||||
|
||||
clean-generic:
|
||||
|
||||
distclean-generic:
|
||||
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
|
||||
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
|
||||
|
||||
maintainer-clean-generic:
|
||||
@echo "This command is intended for maintainers to use"
|
||||
@echo "it deletes files that may require special tools to rebuild."
|
||||
-test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
|
||||
clean: clean-am
|
||||
|
||||
clean-am: clean-generic clean-libtool mostlyclean-am
|
||||
|
||||
distclean: distclean-am
|
||||
-rm -f Makefile
|
||||
distclean-am: clean-am distclean-generic
|
||||
|
||||
dvi: dvi-am
|
||||
|
||||
dvi-am:
|
||||
|
||||
html: html-am
|
||||
|
||||
html-am:
|
||||
|
||||
info: info-am
|
||||
|
||||
info-am:
|
||||
|
||||
install-data-am: install-appPYTHON
|
||||
|
||||
install-dvi: install-dvi-am
|
||||
|
||||
install-dvi-am:
|
||||
|
||||
install-exec-am:
|
||||
|
||||
install-html: install-html-am
|
||||
|
||||
install-html-am:
|
||||
|
||||
install-info: install-info-am
|
||||
|
||||
install-info-am:
|
||||
|
||||
install-man:
|
||||
|
||||
install-pdf: install-pdf-am
|
||||
|
||||
install-pdf-am:
|
||||
|
||||
install-ps: install-ps-am
|
||||
|
||||
install-ps-am:
|
||||
|
||||
installcheck-am:
|
||||
|
||||
maintainer-clean: maintainer-clean-am
|
||||
-rm -f Makefile
|
||||
maintainer-clean-am: distclean-am maintainer-clean-generic
|
||||
|
||||
mostlyclean: mostlyclean-am
|
||||
|
||||
mostlyclean-am: mostlyclean-generic mostlyclean-libtool
|
||||
|
||||
pdf: pdf-am
|
||||
|
||||
pdf-am:
|
||||
|
||||
ps: ps-am
|
||||
|
||||
ps-am:
|
||||
|
||||
uninstall-am: uninstall-appPYTHON
|
||||
|
||||
.MAKE: install-am install-strip
|
||||
|
||||
.PHONY: all all-am check check-am clean clean-generic clean-libtool \
|
||||
cscopelist-am ctags-am distclean distclean-generic \
|
||||
distclean-libtool distdir dvi dvi-am html html-am info info-am \
|
||||
install install-am install-appPYTHON install-data \
|
||||
install-data-am install-dvi install-dvi-am install-exec \
|
||||
install-exec-am install-html install-html-am install-info \
|
||||
install-info-am install-man install-pdf install-pdf-am \
|
||||
install-ps install-ps-am install-strip installcheck \
|
||||
installcheck-am installdirs maintainer-clean \
|
||||
maintainer-clean-generic mostlyclean mostlyclean-generic \
|
||||
mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \
|
||||
uninstall-am uninstall-appPYTHON
|
||||
|
||||
|
||||
# Tell versions [3.59,3.63) of GNU make to not export all variables.
|
||||
# Otherwise a system limit (for SysV at least) may be exceeded.
|
||||
.NOEXPORT:
|
||||
18
ipa-client/ipaclient/__init__.py
Normal file
18
ipa-client/ipaclient/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Authors: Simo Sorce <ssorce@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2007 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
535
ipa-client/ipaclient/ipachangeconf.py
Normal file
535
ipa-client/ipaclient/ipachangeconf.py
Normal file
@@ -0,0 +1,535 @@
|
||||
#
|
||||
# ipachangeconf - configuration file manipulation classes and functions
|
||||
# partially based on authconfig code
|
||||
# Copyright (c) 1999-2007 Red Hat, Inc.
|
||||
# Author: Simo Sorce <ssorce@redhat.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import string
|
||||
import time
|
||||
import shutil
|
||||
|
||||
|
||||
def openLocked(filename, perms):
|
||||
fd = -1
|
||||
try:
|
||||
fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms)
|
||||
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX)
|
||||
except OSError, (errno, strerr):
|
||||
if fd != -1:
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError:
|
||||
pass
|
||||
raise IOError(errno, strerr)
|
||||
return os.fdopen(fd, "r+")
|
||||
|
||||
|
||||
#TODO: add subsection as a concept
|
||||
# (ex. REALM.NAME = { foo = x bar = y } )
|
||||
#TODO: put section delimiters as separating element of the list
|
||||
# so that we can process multiple sections in one go
|
||||
#TODO: add a comment all but provided options as a section option
|
||||
class IPAChangeConf:
|
||||
|
||||
def __init__(self, name):
|
||||
self.progname = name
|
||||
self.indent = ("", "", "")
|
||||
self.assign = (" = ", "=")
|
||||
self.dassign = self.assign[0]
|
||||
self.comment = ("#",)
|
||||
self.dcomment = self.comment[0]
|
||||
self.eol = ("\n",)
|
||||
self.deol = self.eol[0]
|
||||
self.sectnamdel = ("[", "]")
|
||||
self.subsectdel = ("{", "}")
|
||||
|
||||
def setProgName(self, name):
|
||||
self.progname = name
|
||||
|
||||
def setIndent(self, indent):
|
||||
if type(indent) is tuple:
|
||||
self.indent = indent
|
||||
elif type(indent) is str:
|
||||
self.indent = (indent, )
|
||||
else:
|
||||
raise ValueError('Indent must be a list of strings')
|
||||
|
||||
def setOptionAssignment(self, assign):
|
||||
if type(assign) is tuple:
|
||||
self.assign = assign
|
||||
else:
|
||||
self.assign = (assign, )
|
||||
self.dassign = self.assign[0]
|
||||
|
||||
def setCommentPrefix(self, comment):
|
||||
if type(comment) is tuple:
|
||||
self.comment = comment
|
||||
else:
|
||||
self.comment = (comment, )
|
||||
self.dcomment = self.comment[0]
|
||||
|
||||
def setEndLine(self, eol):
|
||||
if type(eol) is tuple:
|
||||
self.eol = eol
|
||||
else:
|
||||
self.eol = (eol, )
|
||||
self.deol = self.eol[0]
|
||||
|
||||
def setSectionNameDelimiters(self, delims):
|
||||
self.sectnamdel = delims
|
||||
|
||||
def setSubSectionDelimiters(self, delims):
|
||||
self.subsectdel = delims
|
||||
|
||||
def matchComment(self, line):
|
||||
for v in self.comment:
|
||||
if line.lstrip().startswith(v):
|
||||
return line.lstrip()[len(v):]
|
||||
return False
|
||||
|
||||
def matchEmpty(self, line):
|
||||
if line.strip() == "":
|
||||
return True
|
||||
return False
|
||||
|
||||
def matchSection(self, line):
|
||||
cl = "".join(line.strip().split()).lower()
|
||||
if len(self.sectnamdel) != 2:
|
||||
return False
|
||||
if not cl.startswith(self.sectnamdel[0]):
|
||||
return False
|
||||
if not cl.endswith(self.sectnamdel[1]):
|
||||
return False
|
||||
return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])]
|
||||
|
||||
def matchSubSection(self, line):
|
||||
if self.matchComment(line):
|
||||
return False
|
||||
|
||||
parts = line.split(self.dassign, 1)
|
||||
if len(parts) < 2:
|
||||
return False
|
||||
|
||||
if parts[1].strip() == self.subsectdel[0]:
|
||||
return parts[0].strip()
|
||||
|
||||
return False
|
||||
|
||||
def matchSubSectionEnd(self, line):
|
||||
if self.matchComment(line):
|
||||
return False
|
||||
|
||||
if line.strip() == self.subsectdel[1]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def getSectionLine(self, section):
|
||||
if len(self.sectnamdel) != 2:
|
||||
return section
|
||||
return self._dump_line(self.sectnamdel[0],
|
||||
section,
|
||||
self.sectnamdel[1],
|
||||
self.deol)
|
||||
|
||||
def _dump_line(self, *args):
|
||||
return u"".join(unicode(x) for x in args)
|
||||
|
||||
def dump(self, options, level=0):
|
||||
output = []
|
||||
if level >= len(self.indent):
|
||||
level = len(self.indent) - 1
|
||||
|
||||
for o in options:
|
||||
if o['type'] == "section":
|
||||
output.append(self._dump_line(self.sectnamdel[0],
|
||||
o['name'],
|
||||
self.sectnamdel[1]))
|
||||
output.append(self.dump(o['value'], (level + 1)))
|
||||
continue
|
||||
if o['type'] == "subsection":
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
self.dassign,
|
||||
self.subsectdel[0]))
|
||||
output.append(self.dump(o['value'], (level + 1)))
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
self.subsectdel[1]))
|
||||
continue
|
||||
if o['type'] == "option":
|
||||
delim = o.get('delim', self.dassign)
|
||||
if delim not in self.assign:
|
||||
raise ValueError('Unknown delim "%s" must be one of "%s"' % (delim, " ".join([d for d in self.assign])))
|
||||
output.append(self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
delim,
|
||||
o['value']))
|
||||
continue
|
||||
if o['type'] == "comment":
|
||||
output.append(self._dump_line(self.dcomment, o['value']))
|
||||
continue
|
||||
if o['type'] == "empty":
|
||||
output.append('')
|
||||
continue
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
return self.deol.join(output)
|
||||
|
||||
def parseLine(self, line):
|
||||
|
||||
if self.matchEmpty(line):
|
||||
return {'name': 'empty', 'type': 'empty'}
|
||||
|
||||
value = self.matchComment(line)
|
||||
if value:
|
||||
return {'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': value.rstrip()} # pylint: disable=E1103
|
||||
|
||||
o = dict()
|
||||
parts = line.split(self.dassign, 1)
|
||||
if len(parts) < 2:
|
||||
# The default assign didn't match, try the non-default
|
||||
for d in self.assign[1:]:
|
||||
parts = line.split(d, 1)
|
||||
if len(parts) >= 2:
|
||||
o['delim'] = d
|
||||
break
|
||||
|
||||
if 'delim' not in o:
|
||||
raise SyntaxError, 'Syntax Error: Unknown line format'
|
||||
|
||||
o.update({'name':parts[0].strip(), 'type':'option', 'value':parts[1].rstrip()})
|
||||
return o
|
||||
|
||||
def findOpts(self, opts, type, name, exclude_sections=False):
|
||||
|
||||
num = 0
|
||||
for o in opts:
|
||||
if o['type'] == type and o['name'] == name:
|
||||
return (num, o)
|
||||
if exclude_sections and (o['type'] == "section" or
|
||||
o['type'] == "subsection"):
|
||||
return (num, None)
|
||||
num += 1
|
||||
return (num, None)
|
||||
|
||||
def commentOpts(self, inopts, level=0):
|
||||
|
||||
opts = []
|
||||
|
||||
if level >= len(self.indent):
|
||||
level = len(self.indent) - 1
|
||||
|
||||
for o in inopts:
|
||||
if o['type'] == 'section':
|
||||
no = self.commentOpts(o['value'], (level + 1))
|
||||
val = self._dump_line(self.dcomment,
|
||||
self.sectnamdel[0],
|
||||
o['name'],
|
||||
self.sectnamdel[1])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
for n in no:
|
||||
opts.append(n)
|
||||
continue
|
||||
if o['type'] == 'subsection':
|
||||
no = self.commentOpts(o['value'], (level + 1))
|
||||
val = self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
self.dassign,
|
||||
self.subsectdel[0])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
opts.extend(no)
|
||||
val = self._dump_line(self.indent[level], self.subsectdel[1])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': val})
|
||||
continue
|
||||
if o['type'] == 'option':
|
||||
delim = o.get('delim', self.dassign)
|
||||
if delim not in self.assign:
|
||||
val = self._dump_line(self.indent[level],
|
||||
o['name'],
|
||||
delim,
|
||||
o['value'])
|
||||
opts.append({'name':'comment', 'type':'comment', 'value':val})
|
||||
continue
|
||||
if o['type'] == 'comment':
|
||||
opts.append(o)
|
||||
continue
|
||||
if o['type'] == 'empty':
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': ''})
|
||||
continue
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
return opts
|
||||
|
||||
def mergeOld(self, oldopts, newopts):
|
||||
|
||||
opts = []
|
||||
|
||||
for o in oldopts:
|
||||
if o['type'] == "section" or o['type'] == "subsection":
|
||||
(num, no) = self.findOpts(newopts, o['type'], o['name'])
|
||||
if not no:
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == "set":
|
||||
mo = self.mergeOld(o['value'], no['value'])
|
||||
opts.append({'name': o['name'],
|
||||
'type': o['type'],
|
||||
'value': mo})
|
||||
continue
|
||||
if no['action'] == "comment":
|
||||
co = self.commentOpts(o['value'])
|
||||
for c in co:
|
||||
opts.append(c)
|
||||
continue
|
||||
if no['action'] == "remove":
|
||||
continue
|
||||
raise SyntaxError('Unknown action: [%s]' % no['action'])
|
||||
|
||||
if o['type'] == "comment" or o['type'] == "empty":
|
||||
opts.append(o)
|
||||
continue
|
||||
|
||||
if o['type'] == "option":
|
||||
(num, no) = self.findOpts(newopts, 'option', o['name'], True)
|
||||
if not no:
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == 'comment' or no['action'] == 'remove':
|
||||
if (no['value'] is not None and
|
||||
o['value'] is not no['value']):
|
||||
opts.append(o)
|
||||
continue
|
||||
if no['action'] == 'comment':
|
||||
value = self._dump_line(self.dcomment,
|
||||
o['name'],
|
||||
self.dassign,
|
||||
o['value'])
|
||||
opts.append({'name': 'comment',
|
||||
'type': 'comment',
|
||||
'value': value})
|
||||
continue
|
||||
if no['action'] == 'set':
|
||||
opts.append(no)
|
||||
continue
|
||||
if no['action'] == 'addifnotset':
|
||||
opts.append({'name': 'comment', 'type': 'comment',
|
||||
'value': self._dump_line(no['name'],
|
||||
self.dassign,
|
||||
no['value'],
|
||||
u' # modified by IPA'
|
||||
)})
|
||||
opts.append(o)
|
||||
continue
|
||||
raise SyntaxError('Unknown action: [%s]' % no['action'])
|
||||
|
||||
raise SyntaxError('Unknown type: [%s]' % o['type'])
|
||||
|
||||
return opts
|
||||
|
||||
def mergeNew(self, opts, newopts):
|
||||
|
||||
cline = 0
|
||||
|
||||
for no in newopts:
|
||||
|
||||
if no['type'] == "section" or no['type'] == "subsection":
|
||||
(num, o) = self.findOpts(opts, no['type'], no['name'])
|
||||
if not o:
|
||||
if no['action'] == 'set':
|
||||
opts.append(no)
|
||||
continue
|
||||
if no['action'] == "set":
|
||||
self.mergeNew(o['value'], no['value'])
|
||||
continue
|
||||
cline = num + 1
|
||||
continue
|
||||
|
||||
if no['type'] == "option":
|
||||
(num, o) = self.findOpts(opts, no['type'], no['name'], True)
|
||||
if not o:
|
||||
if no['action'] == 'set' or no['action'] == 'addifnotset':
|
||||
opts.append(no)
|
||||
continue
|
||||
cline = num + 1
|
||||
continue
|
||||
|
||||
if no['type'] == "comment" or no['type'] == "empty":
|
||||
opts.insert(cline, no)
|
||||
cline += 1
|
||||
continue
|
||||
|
||||
raise SyntaxError('Unknown type: [%s]' % no['type'])
|
||||
|
||||
def merge(self, oldopts, newopts):
|
||||
|
||||
#Use a two pass strategy
|
||||
#First we create a new opts tree from oldopts removing/commenting
|
||||
# the options as indicated by the contents of newopts
|
||||
#Second we fill in the new opts tree with options as indicated
|
||||
# in the newopts tree (this is becaus eentire (sub)sections may
|
||||
# in the newopts tree (this is becaus entire (sub)sections may
|
||||
# exist in the newopts that do not exist in oldopts)
|
||||
|
||||
opts = self.mergeOld(oldopts, newopts)
|
||||
self.mergeNew(opts, newopts)
|
||||
return opts
|
||||
|
||||
#TODO: Make parse() recursive?
|
||||
def parse(self, f):
|
||||
|
||||
opts = []
|
||||
sectopts = []
|
||||
section = None
|
||||
subsectopts = []
|
||||
subsection = None
|
||||
curopts = opts
|
||||
fatheropts = opts
|
||||
|
||||
# Read in the old file.
|
||||
for line in f:
|
||||
|
||||
# It's a section start.
|
||||
value = self.matchSection(line)
|
||||
if value:
|
||||
if section is not None:
|
||||
opts.append({'name': section,
|
||||
'type': 'section',
|
||||
'value': sectopts})
|
||||
sectopts = []
|
||||
curopts = sectopts
|
||||
fatheropts = sectopts
|
||||
section = value
|
||||
continue
|
||||
|
||||
# It's a subsection start.
|
||||
value = self.matchSubSection(line)
|
||||
if value:
|
||||
if subsection is not None:
|
||||
raise SyntaxError('nested subsections are not '
|
||||
'supported yet')
|
||||
subsectopts = []
|
||||
curopts = subsectopts
|
||||
subsection = value
|
||||
continue
|
||||
|
||||
value = self.matchSubSectionEnd(line)
|
||||
if value:
|
||||
if subsection is None:
|
||||
raise SyntaxError('Unmatched end subsection terminator '
|
||||
'found')
|
||||
fatheropts.append({'name': subsection,
|
||||
'type': 'subsection',
|
||||
'value': subsectopts})
|
||||
subsection = None
|
||||
curopts = fatheropts
|
||||
continue
|
||||
|
||||
# Copy anything else as is.
|
||||
curopts.append(self.parseLine(line))
|
||||
|
||||
#Add last section if any
|
||||
if len(sectopts) is not 0:
|
||||
opts.append({'name': section,
|
||||
'type': 'section',
|
||||
'value': sectopts})
|
||||
|
||||
return opts
|
||||
|
||||
# Write settings to configuration file
|
||||
# file is a path
|
||||
# options is a set of dictionaries in the form:
|
||||
# [{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}]
|
||||
# section is a section name like 'global'
|
||||
def changeConf(self, file, newopts):
|
||||
autosection = False
|
||||
savedsection = None
|
||||
done = False
|
||||
output = ""
|
||||
f = None
|
||||
try:
|
||||
# Do not catch an unexisting file error
|
||||
# we want to fail in that case
|
||||
shutil.copy2(file, (file + ".ipabkp"))
|
||||
|
||||
f = openLocked(file, 0644)
|
||||
|
||||
oldopts = self.parse(f)
|
||||
|
||||
options = self.merge(oldopts, newopts)
|
||||
|
||||
output = self.dump(options)
|
||||
|
||||
# Write it out and close it.
|
||||
f.seek(0)
|
||||
f.truncate(0)
|
||||
f.write(output)
|
||||
finally:
|
||||
try:
|
||||
if f:
|
||||
f.close()
|
||||
except IOError:
|
||||
pass
|
||||
return True
|
||||
|
||||
# Write settings to new file, backup old
|
||||
# file is a path
|
||||
# options is a set of dictionaries in the form:
|
||||
# [{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}]
|
||||
# section is a section name like 'global'
|
||||
def newConf(self, file, options):
|
||||
autosection = False
|
||||
savedsection = None
|
||||
done = False
|
||||
output = ""
|
||||
f = None
|
||||
try:
|
||||
try:
|
||||
shutil.copy2(file, (file + ".ipabkp"))
|
||||
except IOError, err:
|
||||
if err.errno == 2:
|
||||
# The orign file did not exist
|
||||
pass
|
||||
|
||||
f = openLocked(file, 0644)
|
||||
|
||||
# Trunkate
|
||||
f.seek(0)
|
||||
f.truncate(0)
|
||||
|
||||
output = self.dump(options)
|
||||
|
||||
f.write(output)
|
||||
finally:
|
||||
try:
|
||||
if f:
|
||||
f.close()
|
||||
except IOError:
|
||||
pass
|
||||
return True
|
||||
501
ipa-client/ipaclient/ipadiscovery.py
Normal file
501
ipa-client/ipaclient/ipadiscovery.py
Normal file
@@ -0,0 +1,501 @@
|
||||
# Authors: Simo Sorce <ssorce@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2007 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import socket
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from ipapython.ipa_log_manager import root_logger
|
||||
from dns import resolver, rdatatype
|
||||
from dns.exception import DNSException
|
||||
from ipalib import errors
|
||||
from ipapython import ipaldap
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython.ipautil import valid_ip, get_ipa_basedn, realm_to_suffix
|
||||
from ipapython.dn import DN
|
||||
|
||||
NOT_FQDN = -1
|
||||
NO_LDAP_SERVER = -2
|
||||
REALM_NOT_FOUND = -3
|
||||
NOT_IPA_SERVER = -4
|
||||
NO_ACCESS_TO_LDAP = -5
|
||||
NO_TLS_LDAP = -6
|
||||
BAD_HOST_CONFIG = -10
|
||||
UNKNOWN_ERROR = -15
|
||||
|
||||
error_names = {
|
||||
0: 'Success',
|
||||
NOT_FQDN: 'NOT_FQDN',
|
||||
NO_LDAP_SERVER: 'NO_LDAP_SERVER',
|
||||
REALM_NOT_FOUND: 'REALM_NOT_FOUND',
|
||||
NOT_IPA_SERVER: 'NOT_IPA_SERVER',
|
||||
NO_ACCESS_TO_LDAP: 'NO_ACCESS_TO_LDAP',
|
||||
NO_TLS_LDAP: 'NO_TLS_LDAP',
|
||||
BAD_HOST_CONFIG: 'BAD_HOST_CONFIG',
|
||||
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
|
||||
}
|
||||
|
||||
class IPADiscovery(object):
|
||||
|
||||
def __init__(self):
|
||||
self.realm = None
|
||||
self.domain = None
|
||||
self.server = None
|
||||
self.servers = []
|
||||
self.basedn = None
|
||||
|
||||
self.realm_source = None
|
||||
self.domain_source = None
|
||||
self.server_source = None
|
||||
self.basedn_source = None
|
||||
|
||||
def __get_resolver_domains(self):
|
||||
"""
|
||||
Read /etc/resolv.conf and return all the domains found in domain and
|
||||
search.
|
||||
|
||||
Returns a list of (domain, info) pairs. The info contains a reason why
|
||||
the domain is returned.
|
||||
"""
|
||||
domains = []
|
||||
domain = None
|
||||
try:
|
||||
fp = open(paths.RESOLV_CONF, 'r')
|
||||
lines = fp.readlines()
|
||||
fp.close()
|
||||
|
||||
for line in lines:
|
||||
if line.lower().startswith('domain'):
|
||||
domain = (line.split()[-1],
|
||||
'local domain from /etc/resolv.conf')
|
||||
elif line.lower().startswith('search'):
|
||||
domains += [(d, 'search domain from /etc/resolv.conf') for
|
||||
d in line.split()[1:]]
|
||||
except:
|
||||
pass
|
||||
if domain:
|
||||
domains = [domain] + domains
|
||||
return domains
|
||||
|
||||
def getServerName(self):
|
||||
return self.server
|
||||
|
||||
def getDomainName(self):
|
||||
return self.domain
|
||||
|
||||
def getRealmName(self):
|
||||
return self.realm
|
||||
|
||||
def getKDCName(self):
|
||||
return self.kdc
|
||||
|
||||
def getBaseDN(self):
|
||||
return self.basedn
|
||||
|
||||
def check_domain(self, domain, tried, reason):
|
||||
"""
|
||||
Given a domain search it for SRV records, breaking it down to search
|
||||
all subdomains too.
|
||||
|
||||
Returns a tuple (servers, domain) or (None,None) if a SRV record
|
||||
isn't found. servers is a list of servers found. domain is a string.
|
||||
|
||||
:param tried: A set of domains that were tried already
|
||||
:param reason: Reason this domain is searched (included in the log)
|
||||
"""
|
||||
servers = None
|
||||
root_logger.debug('Start searching for LDAP SRV record in "%s" (%s) ' +
|
||||
'and its sub-domains', domain, reason)
|
||||
while not servers:
|
||||
if domain in tried:
|
||||
root_logger.debug("Already searched %s; skipping", domain)
|
||||
break
|
||||
tried.add(domain)
|
||||
|
||||
servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389,
|
||||
break_on_first=False)
|
||||
if servers:
|
||||
return (servers, domain)
|
||||
else:
|
||||
p = domain.find(".")
|
||||
if p == -1: #no ldap server found and last component of the domain already tested
|
||||
return (None, None)
|
||||
domain = domain[p+1:]
|
||||
return (None, None)
|
||||
|
||||
def search(self, domain="", servers="", realm=None, hostname=None, ca_cert_path=None):
|
||||
"""
|
||||
Use DNS discovery to identify valid IPA servers.
|
||||
|
||||
servers may contain an optional list of servers which will be used
|
||||
instead of discovering available LDAP SRV records.
|
||||
|
||||
Returns a constant representing the overall search result.
|
||||
"""
|
||||
root_logger.debug("[IPA Discovery]")
|
||||
root_logger.debug(
|
||||
'Starting IPA discovery with domain=%s, servers=%s, hostname=%s',
|
||||
domain, servers, hostname)
|
||||
|
||||
self.server = None
|
||||
autodiscovered = False
|
||||
|
||||
if not servers:
|
||||
|
||||
if not domain: #domain not provided do full DNS discovery
|
||||
|
||||
# get the local host name
|
||||
if not hostname:
|
||||
hostname = socket.getfqdn()
|
||||
root_logger.debug('Hostname: %s', hostname)
|
||||
if not hostname:
|
||||
return BAD_HOST_CONFIG
|
||||
|
||||
if valid_ip(hostname):
|
||||
return NOT_FQDN
|
||||
|
||||
# first, check for an LDAP server for the local domain
|
||||
p = hostname.find(".")
|
||||
if p == -1: #no domain name
|
||||
return NOT_FQDN
|
||||
domain = hostname[p+1:]
|
||||
|
||||
# Get the list of domains from /etc/resolv.conf, we'll search
|
||||
# them all. We search the domain of our hostname first though.
|
||||
# This is to avoid the situation where domain isn't set in
|
||||
# /etc/resolv.conf and the search list has the hostname domain
|
||||
# not first. We could end up with the wrong SRV record.
|
||||
domains = self.__get_resolver_domains()
|
||||
domains = [(domain, 'domain of the hostname')] + domains
|
||||
tried = set()
|
||||
for domain, reason in domains:
|
||||
servers, domain = self.check_domain(domain, tried, reason)
|
||||
if servers:
|
||||
autodiscovered = True
|
||||
self.domain = domain
|
||||
self.server_source = self.domain_source = (
|
||||
'Discovered LDAP SRV records from %s (%s)' %
|
||||
(domain, reason))
|
||||
break
|
||||
if not self.domain: #no ldap server found
|
||||
root_logger.debug('No LDAP server found')
|
||||
return NO_LDAP_SERVER
|
||||
else:
|
||||
root_logger.debug("Search for LDAP SRV record in %s", domain)
|
||||
servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389,
|
||||
break_on_first=False)
|
||||
if servers:
|
||||
autodiscovered = True
|
||||
self.domain = domain
|
||||
self.server_source = self.domain_source = (
|
||||
'Discovered LDAP SRV records from %s' % domain)
|
||||
else:
|
||||
self.server = None
|
||||
root_logger.debug('No LDAP server found')
|
||||
return NO_LDAP_SERVER
|
||||
|
||||
else:
|
||||
|
||||
root_logger.debug("Server and domain forced")
|
||||
self.domain = domain
|
||||
self.domain_source = self.server_source = 'Forced'
|
||||
|
||||
#search for kerberos
|
||||
root_logger.debug("[Kerberos realm search]")
|
||||
if realm:
|
||||
root_logger.debug("Kerberos realm forced")
|
||||
self.realm = realm
|
||||
self.realm_source = 'Forced'
|
||||
else:
|
||||
realm = self.ipadnssearchkrbrealm()
|
||||
self.realm = realm
|
||||
self.realm_source = (
|
||||
'Discovered Kerberos DNS records from %s' % self.domain)
|
||||
|
||||
if not servers and not realm:
|
||||
return REALM_NOT_FOUND
|
||||
|
||||
self.kdc = self.ipadnssearchkrbkdc()
|
||||
self.kdc_source = (
|
||||
'Discovered Kerberos DNS records from %s' % self.domain)
|
||||
|
||||
# We may have received multiple servers corresponding to the domain
|
||||
# Iterate through all of those to check if it is IPA LDAP server
|
||||
ldapret = [NOT_IPA_SERVER]
|
||||
ldapaccess = True
|
||||
root_logger.debug("[LDAP server check]")
|
||||
valid_servers = []
|
||||
for server in servers:
|
||||
root_logger.debug('Verifying that %s (realm %s) is an IPA server',
|
||||
server, self.realm)
|
||||
# check ldap now
|
||||
ldapret = self.ipacheckldap(server, self.realm, ca_cert_path=ca_cert_path)
|
||||
|
||||
if ldapret[0] == 0:
|
||||
self.server = ldapret[1]
|
||||
self.realm = ldapret[2]
|
||||
self.server_source = self.realm_source = (
|
||||
'Discovered from LDAP DNS records in %s' % self.server)
|
||||
valid_servers.append(server)
|
||||
# verified, we actually talked to the remote server and it
|
||||
# is definetely an IPA server
|
||||
if autodiscovered:
|
||||
# No need to keep verifying servers if we discovered them
|
||||
# via DNS
|
||||
break
|
||||
elif ldapret[0] == NO_ACCESS_TO_LDAP or ldapret[0] == NO_TLS_LDAP:
|
||||
ldapaccess = False
|
||||
valid_servers.append(server)
|
||||
# we may set verified_servers below, we don't have it yet
|
||||
if autodiscovered:
|
||||
# No need to keep verifying servers if we discovered them
|
||||
# via DNS
|
||||
break
|
||||
elif ldapret[0] == NOT_IPA_SERVER:
|
||||
root_logger.warn(
|
||||
'Skip %s: not an IPA server', server)
|
||||
elif ldapret[0] == NO_LDAP_SERVER:
|
||||
root_logger.warn(
|
||||
'Skip %s: LDAP server is not responding, unable to verify if '
|
||||
'this is an IPA server', server)
|
||||
else:
|
||||
root_logger.warn(
|
||||
'Skip %s: cannot verify if this is an IPA server', server)
|
||||
|
||||
# If one of LDAP servers checked rejects access (maybe anonymous
|
||||
# bind is disabled), assume realm and basedn generated off domain.
|
||||
# Note that in case ldapret[0] == 0 and ldapaccess == False (one of
|
||||
# servers didn't provide access but another one succeeded), self.realm
|
||||
# will be set already to a proper value above, self.basdn will be
|
||||
# initialized during the LDAP check itself and we'll skip these two checks.
|
||||
if not ldapaccess and self.realm is None:
|
||||
# Assume realm is the same as domain.upper()
|
||||
self.realm = self.domain.upper()
|
||||
self.realm_source = 'Assumed same as domain'
|
||||
root_logger.debug(
|
||||
"Assuming realm is the same as domain: %s", self.realm)
|
||||
|
||||
if not ldapaccess and self.basedn is None:
|
||||
# Generate suffix from realm
|
||||
self.basedn = realm_to_suffix(self.realm)
|
||||
self.basedn_source = 'Generated from Kerberos realm'
|
||||
root_logger.debug("Generated basedn from realm: %s" % self.basedn)
|
||||
|
||||
root_logger.debug(
|
||||
"Discovery result: %s; server=%s, domain=%s, kdc=%s, basedn=%s",
|
||||
error_names.get(ldapret[0], ldapret[0]),
|
||||
self.server, self.domain, self.kdc, self.basedn)
|
||||
|
||||
root_logger.debug("Validated servers: %s" % ','.join(valid_servers))
|
||||
self.servers = valid_servers
|
||||
|
||||
# If we have any servers left then override the last return value
|
||||
# to indicate success.
|
||||
if valid_servers:
|
||||
self.server = servers[0]
|
||||
ldapret[0] = 0
|
||||
|
||||
return ldapret[0]
|
||||
|
||||
def ipacheckldap(self, thost, trealm, ca_cert_path=None):
|
||||
"""
|
||||
Given a host and kerberos realm verify that it is an IPA LDAP
|
||||
server hosting the realm.
|
||||
|
||||
Returns a list [errno, host, realm] or an empty list on error.
|
||||
Errno is an error number:
|
||||
0 means all ok
|
||||
1 means we could not check the info in LDAP (may happend when
|
||||
anonymous binds are disabled)
|
||||
2 means the server is certainly not an IPA server
|
||||
"""
|
||||
|
||||
lrealms = []
|
||||
|
||||
i = 0
|
||||
|
||||
#now verify the server is really an IPA server
|
||||
try:
|
||||
root_logger.debug("Init LDAP connection to: %s", thost)
|
||||
if ca_cert_path:
|
||||
lh = ipaldap.IPAdmin(thost, protocol='ldap',
|
||||
cacert=ca_cert_path, start_tls=True,
|
||||
no_schema=True, decode_attrs=False,
|
||||
demand_cert=True)
|
||||
else:
|
||||
lh = ipaldap.IPAdmin(thost, protocol='ldap',
|
||||
no_schema=True, decode_attrs=False)
|
||||
try:
|
||||
lh.do_simple_bind(DN(), '')
|
||||
|
||||
# get IPA base DN
|
||||
root_logger.debug("Search LDAP server for IPA base DN")
|
||||
basedn = get_ipa_basedn(lh)
|
||||
except errors.ACIError:
|
||||
root_logger.debug("LDAP Error: Anonymous access not allowed")
|
||||
return [NO_ACCESS_TO_LDAP]
|
||||
except errors.DatabaseError, err:
|
||||
root_logger.error("Error checking LDAP: %s" % err.strerror)
|
||||
# We should only get UNWILLING_TO_PERFORM if the remote LDAP
|
||||
# server has minssf > 0 and we have attempted a non-TLS conn.
|
||||
if ca_cert_path is None:
|
||||
root_logger.debug(
|
||||
"Cannot connect to LDAP server. Check that minssf is "
|
||||
"not enabled")
|
||||
return [NO_TLS_LDAP]
|
||||
else:
|
||||
return [UNKNOWN_ERROR]
|
||||
|
||||
if basedn is None:
|
||||
root_logger.debug("The server is not an IPA server")
|
||||
return [NOT_IPA_SERVER]
|
||||
|
||||
self.basedn = basedn
|
||||
self.basedn_source = 'From IPA server %s' % lh.ldap_uri
|
||||
|
||||
#search and return known realms
|
||||
root_logger.debug(
|
||||
"Search for (objectClass=krbRealmContainer) in %s (sub)",
|
||||
self.basedn)
|
||||
try:
|
||||
lret = lh.get_entries(
|
||||
DN(('cn', 'kerberos'), self.basedn),
|
||||
lh.SCOPE_SUBTREE, "(objectClass=krbRealmContainer)")
|
||||
except errors.NotFound:
|
||||
#something very wrong
|
||||
return [REALM_NOT_FOUND]
|
||||
|
||||
for lres in lret:
|
||||
root_logger.debug("Found: %s", lres.dn)
|
||||
lrealms.append(lres.single_value['cn'])
|
||||
|
||||
if trealm:
|
||||
for r in lrealms:
|
||||
if trealm == r:
|
||||
return [0, thost, trealm]
|
||||
# must match or something is very wrong
|
||||
return [REALM_NOT_FOUND]
|
||||
else:
|
||||
if len(lrealms) != 1:
|
||||
#which one? we can't attach to a multi-realm server without DNS working
|
||||
return [REALM_NOT_FOUND]
|
||||
else:
|
||||
return [0, thost, lrealms[0]]
|
||||
|
||||
#we shouldn't get here
|
||||
return [UNKNOWN_ERROR]
|
||||
|
||||
except errors.DatabaseTimeout:
|
||||
root_logger.debug("LDAP Error: timeout")
|
||||
return [NO_LDAP_SERVER]
|
||||
except errors.NetworkError, err:
|
||||
root_logger.debug("LDAP Error: %s" % err.strerror)
|
||||
return [NO_LDAP_SERVER]
|
||||
except errors.ACIError:
|
||||
root_logger.debug("LDAP Error: Anonymous access not allowed")
|
||||
return [NO_ACCESS_TO_LDAP]
|
||||
except errors.DatabaseError, err:
|
||||
root_logger.debug("Error checking LDAP: %s" % err.strerror)
|
||||
return [UNKNOWN_ERROR]
|
||||
except Exception, err:
|
||||
root_logger.debug("Error checking LDAP: %s" % err)
|
||||
|
||||
return [UNKNOWN_ERROR]
|
||||
|
||||
|
||||
def ipadns_search_srv(self, domain, srv_record_name, default_port,
|
||||
break_on_first=True):
|
||||
"""
|
||||
Search for SRV records in given domain. When no record is found,
|
||||
en empty list is returned
|
||||
|
||||
:param domain: Search domain name
|
||||
:param srv_record_name: SRV record name, e.g. "_ldap._tcp"
|
||||
:param default_port: When default_port is not None, it is being
|
||||
checked with the port in SRV record and if they don't
|
||||
match, the port from SRV record is appended to
|
||||
found hostname in this format: "hostname:port"
|
||||
:param break_on_first: break on the first find and return just one
|
||||
entry
|
||||
"""
|
||||
servers = []
|
||||
|
||||
qname = '%s.%s' % (srv_record_name, domain)
|
||||
|
||||
root_logger.debug("Search DNS for SRV record of %s", qname)
|
||||
|
||||
try:
|
||||
answers = resolver.query(qname, rdatatype.SRV)
|
||||
except DNSException, e:
|
||||
root_logger.debug("DNS record not found: %s", e.__class__.__name__)
|
||||
answers = []
|
||||
|
||||
for answer in answers:
|
||||
root_logger.debug("DNS record found: %s", answer)
|
||||
server = str(answer.target).rstrip(".")
|
||||
if not server:
|
||||
root_logger.debug("Cannot parse the hostname from SRV record: %s", answer)
|
||||
continue
|
||||
if default_port is not None and answer.port != default_port:
|
||||
server = "%s:%s" % (server, str(answer.port))
|
||||
servers.append(server)
|
||||
if break_on_first:
|
||||
break
|
||||
|
||||
return servers
|
||||
|
||||
def ipadnssearchkrbrealm(self, domain=None):
|
||||
realm = None
|
||||
if not domain:
|
||||
domain = self.domain
|
||||
# now, check for a Kerberos realm the local host or domain is in
|
||||
qname = "_kerberos." + domain
|
||||
|
||||
root_logger.debug("Search DNS for TXT record of %s", qname)
|
||||
|
||||
try:
|
||||
answers = resolver.query(qname, rdatatype.TXT)
|
||||
except DNSException, e:
|
||||
root_logger.debug("DNS record not found: %s", e.__class__.__name__)
|
||||
answers = []
|
||||
|
||||
for answer in answers:
|
||||
root_logger.debug("DNS record found: %s", answer)
|
||||
if answer.strings:
|
||||
realm = answer.strings[0]
|
||||
if realm:
|
||||
break
|
||||
return realm
|
||||
|
||||
def ipadnssearchkrbkdc(self, domain=None):
|
||||
kdc = None
|
||||
|
||||
if not domain:
|
||||
domain = self.domain
|
||||
|
||||
kdc = self.ipadns_search_srv(domain, '_kerberos._udp', 88,
|
||||
break_on_first=False)
|
||||
|
||||
if kdc:
|
||||
kdc = ','.join(kdc)
|
||||
else:
|
||||
root_logger.debug("SRV record for KDC not found! Domain: %s" % domain)
|
||||
kdc = None
|
||||
|
||||
return kdc
|
||||
223
ipa-client/ipaclient/ntpconf.py
Normal file
223
ipa-client/ipaclient/ntpconf.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# Authors: Karl MacMillan <kmacmillan@redhat.com>
|
||||
#
|
||||
# Copyright (C) 2007 Red Hat
|
||||
# see file 'COPYING' for use and warranty information
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from ipapython import ipautil
|
||||
import shutil
|
||||
import os
|
||||
from ipaplatform.tasks import tasks
|
||||
from ipaplatform import services
|
||||
from ipaplatform.paths import paths
|
||||
|
||||
ntp_conf = """# Permit time synchronization with our time source, but do not
|
||||
# permit the source to query or modify the service on this system.
|
||||
restrict default kod nomodify notrap nopeer noquery
|
||||
restrict -6 default kod nomodify notrap nopeer noquery
|
||||
|
||||
# Permit all access over the loopback interface. This could
|
||||
# be tightened as well, but to do so would effect some of
|
||||
# the administrative functions.
|
||||
restrict 127.0.0.1
|
||||
restrict -6 ::1
|
||||
|
||||
# Hosts on local network are less restricted.
|
||||
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
|
||||
|
||||
# Use public servers from the pool.ntp.org project.
|
||||
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
|
||||
server $SERVER
|
||||
|
||||
#broadcast 192.168.1.255 key 42 # broadcast server
|
||||
#broadcastclient # broadcast client
|
||||
#broadcast 224.0.1.1 key 42 # multicast server
|
||||
#multicastclient 224.0.1.1 # multicast client
|
||||
#manycastserver 239.255.254.254 # manycast server
|
||||
#manycastclient 239.255.254.254 key 42 # manycast client
|
||||
|
||||
# Undisciplined Local Clock. This is a fake driver intended for backup
|
||||
# and when no outside source of synchronized time is available.
|
||||
server 127.127.1.0 # local clock
|
||||
#fudge 127.127.1.0 stratum 10
|
||||
|
||||
# Drift file. Put this in a directory which the daemon can write to.
|
||||
# No symbolic links allowed, either, since the daemon updates the file
|
||||
# by creating a temporary in the same directory and then rename()'ing
|
||||
# it to the file.
|
||||
driftfile /var/lib/ntp/drift
|
||||
|
||||
# Key file containing the keys and key identifiers used when operating
|
||||
# with symmetric key cryptography.
|
||||
keys /etc/ntp/keys
|
||||
|
||||
# Specify the key identifiers which are trusted.
|
||||
#trustedkey 4 8 42
|
||||
|
||||
# Specify the key identifier to use with the ntpdc utility.
|
||||
#requestkey 8
|
||||
|
||||
# Specify the key identifier to use with the ntpq utility.
|
||||
#controlkey 8
|
||||
"""
|
||||
|
||||
ntp_sysconfig = """OPTIONS="-x -p /var/run/ntpd.pid"
|
||||
|
||||
# Set to 'yes' to sync hw clock after successful ntpdate
|
||||
SYNC_HWCLOCK=yes
|
||||
|
||||
# Additional options for ntpdate
|
||||
NTPDATE_OPTIONS=""
|
||||
"""
|
||||
ntp_step_tickers = """# Use IPA-provided NTP server for initial time
|
||||
$SERVER
|
||||
"""
|
||||
def __backup_config(path, fstore = None):
|
||||
if fstore:
|
||||
fstore.backup_file(path)
|
||||
else:
|
||||
shutil.copy(path, "%s.ipasave" % (path))
|
||||
|
||||
def __write_config(path, content):
|
||||
fd = open(path, "w")
|
||||
fd.write(content)
|
||||
fd.close()
|
||||
|
||||
def config_ntp(server_fqdn, fstore = None, sysstore = None):
|
||||
path_step_tickers = paths.NTP_STEP_TICKERS
|
||||
path_ntp_conf = paths.NTP_CONF
|
||||
path_ntp_sysconfig = paths.SYSCONFIG_NTPD
|
||||
sub_dict = { }
|
||||
sub_dict["SERVER"] = server_fqdn
|
||||
|
||||
nc = ipautil.template_str(ntp_conf, sub_dict)
|
||||
config_step_tickers = False
|
||||
|
||||
|
||||
if os.path.exists(path_step_tickers):
|
||||
config_step_tickers = True
|
||||
ns = ipautil.template_str(ntp_step_tickers, sub_dict)
|
||||
__backup_config(path_step_tickers, fstore)
|
||||
__write_config(path_step_tickers, ns)
|
||||
tasks.restore_context(path_step_tickers)
|
||||
|
||||
if sysstore:
|
||||
module = 'ntp'
|
||||
sysstore.backup_state(module, "enabled", services.knownservices.ntpd.is_enabled())
|
||||
if config_step_tickers:
|
||||
sysstore.backup_state(module, "step-tickers", True)
|
||||
|
||||
__backup_config(path_ntp_conf, fstore)
|
||||
__write_config(path_ntp_conf, nc)
|
||||
tasks.restore_context(path_ntp_conf)
|
||||
|
||||
__backup_config(path_ntp_sysconfig, fstore)
|
||||
__write_config(path_ntp_sysconfig, ntp_sysconfig)
|
||||
tasks.restore_context(path_ntp_sysconfig)
|
||||
|
||||
# Set the ntpd to start on boot
|
||||
services.knownservices.ntpd.enable()
|
||||
|
||||
# Restart ntpd
|
||||
services.knownservices.ntpd.restart()
|
||||
|
||||
|
||||
def synconce_ntp(server_fqdn):
|
||||
"""
|
||||
Syncs time with specified server using ntpd.
|
||||
Primarily designed to be used before Kerberos setup
|
||||
to get time following the KDC time
|
||||
|
||||
Returns True if sync was successful
|
||||
"""
|
||||
ntpd = paths.NTPD
|
||||
if not os.path.exists(ntpd):
|
||||
return False
|
||||
|
||||
tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn)
|
||||
try:
|
||||
ipautil.run([ntpd, '-qgc', tmp_ntp_conf.name])
|
||||
return True
|
||||
except ipautil.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
class NTPConfigurationError(Exception):
|
||||
pass
|
||||
|
||||
class NTPConflictingService(NTPConfigurationError):
|
||||
def __init__(self, message='', conflicting_service=None):
|
||||
super(NTPConflictingService, self).__init__(self, message)
|
||||
self.conflicting_service = conflicting_service
|
||||
|
||||
def check_timedate_services():
|
||||
"""
|
||||
System may contain conflicting services used for time&date synchronization.
|
||||
As IPA server/client supports only ntpd, make sure that other services are
|
||||
not enabled to prevent conflicts. For example when both chronyd and ntpd
|
||||
are enabled, systemd would always start only chronyd to manage system
|
||||
time&date which would make IPA configuration of ntpd ineffective.
|
||||
|
||||
Reference links:
|
||||
https://fedorahosted.org/freeipa/ticket/2974
|
||||
http://fedoraproject.org/wiki/Features/ChronyDefaultNTP
|
||||
"""
|
||||
for service in services.timedate_services:
|
||||
if service == 'ntpd':
|
||||
continue
|
||||
# Make sure that the service is not enabled
|
||||
instance = services.service(service)
|
||||
if instance.is_enabled() or instance.is_running():
|
||||
raise NTPConflictingService(conflicting_service=instance.service_name)
|
||||
|
||||
def force_ntpd(statestore):
|
||||
"""
|
||||
Force ntpd configuration and disable and stop any other conflicting
|
||||
time&date service
|
||||
"""
|
||||
for service in services.timedate_services:
|
||||
if service == 'ntpd':
|
||||
continue
|
||||
instance = services.service(service)
|
||||
enabled = instance.is_enabled()
|
||||
running = instance.is_running()
|
||||
|
||||
if enabled or running:
|
||||
statestore.backup_state(instance.service_name, 'enabled', enabled)
|
||||
statestore.backup_state(instance.service_name, 'running', running)
|
||||
|
||||
if running:
|
||||
instance.stop()
|
||||
|
||||
if enabled:
|
||||
instance.disable()
|
||||
|
||||
def restore_forced_ntpd(statestore):
|
||||
"""
|
||||
Restore from --force-ntpd installation and enable/start service that were
|
||||
disabled/stopped during installation
|
||||
"""
|
||||
for service in services.timedate_services:
|
||||
if service == 'ntpd':
|
||||
continue
|
||||
if statestore.has_state(service):
|
||||
instance = services.service(service)
|
||||
enabled = statestore.restore_state(instance.service_name, 'enabled')
|
||||
running = statestore.restore_state(instance.service_name, 'running')
|
||||
if enabled:
|
||||
instance.enable()
|
||||
if running:
|
||||
instance.start()
|
||||
Reference in New Issue
Block a user