Imported Upstream version 4.0.5
This commit is contained in:
@@ -1,193 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
from ipaserver import p11helper as _ipap11helper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
attrs_id2name = {
|
||||
#_ipap11helper.CKA_ALLOWED_MECHANISMS: 'ipk11allowedmechanisms',
|
||||
_ipap11helper.CKA_ALWAYS_AUTHENTICATE: 'ipk11alwaysauthenticate',
|
||||
_ipap11helper.CKA_ALWAYS_SENSITIVE: 'ipk11alwayssensitive',
|
||||
#_ipap11helper.CKA_CHECK_VALUE: 'ipk11checkvalue',
|
||||
_ipap11helper.CKA_COPYABLE: 'ipk11copyable',
|
||||
_ipap11helper.CKA_DECRYPT: 'ipk11decrypt',
|
||||
_ipap11helper.CKA_DERIVE: 'ipk11derive',
|
||||
#_ipap11helper.CKA_DESTROYABLE: 'ipk11destroyable',
|
||||
_ipap11helper.CKA_ENCRYPT: 'ipk11encrypt',
|
||||
#_ipap11helper.CKA_END_DATE: 'ipk11enddate',
|
||||
_ipap11helper.CKA_EXTRACTABLE: 'ipk11extractable',
|
||||
_ipap11helper.CKA_ID: 'ipk11id',
|
||||
#_ipap11helper.CKA_KEY_GEN_MECHANISM: 'ipk11keygenmechanism',
|
||||
_ipap11helper.CKA_KEY_TYPE: 'ipk11keytype',
|
||||
_ipap11helper.CKA_LABEL: 'ipk11label',
|
||||
_ipap11helper.CKA_LOCAL: 'ipk11local',
|
||||
_ipap11helper.CKA_MODIFIABLE: 'ipk11modifiable',
|
||||
_ipap11helper.CKA_NEVER_EXTRACTABLE: 'ipk11neverextractable',
|
||||
_ipap11helper.CKA_PRIVATE: 'ipk11private',
|
||||
#_ipap11helper.CKA_PUBLIC_KEY_INFO: 'ipapublickey',
|
||||
#_ipap11helper.CKA_PUBLIC_KEY_INFO: 'ipk11publickeyinfo',
|
||||
_ipap11helper.CKA_SENSITIVE: 'ipk11sensitive',
|
||||
_ipap11helper.CKA_SIGN: 'ipk11sign',
|
||||
_ipap11helper.CKA_SIGN_RECOVER: 'ipk11signrecover',
|
||||
#_ipap11helper.CKA_START_DATE: 'ipk11startdate',
|
||||
#_ipap11helper.CKA_SUBJECT: 'ipk11subject',
|
||||
_ipap11helper.CKA_TRUSTED: 'ipk11trusted',
|
||||
_ipap11helper.CKA_UNWRAP: 'ipk11unwrap',
|
||||
#_ipap11helper.CKA_UNWRAP_TEMPLATE: 'ipk11unwraptemplate',
|
||||
_ipap11helper.CKA_VERIFY: 'ipk11verify',
|
||||
_ipap11helper.CKA_VERIFY_RECOVER: 'ipk11verifyrecover',
|
||||
_ipap11helper.CKA_WRAP: 'ipk11wrap',
|
||||
#_ipap11helper.CKA_WRAP_TEMPLATE: 'ipk11wraptemplate',
|
||||
_ipap11helper.CKA_WRAP_WITH_TRUSTED: 'ipk11wrapwithtrusted',
|
||||
}
|
||||
|
||||
attrs_name2id = {v: k for k, v in attrs_id2name.items()}
|
||||
|
||||
# attribute:
|
||||
# http://www.freeipa.org/page/V4/PKCS11_in_LDAP/Schema#ipk11KeyType
|
||||
#
|
||||
# mapping table:
|
||||
# http://www.freeipa.org/page/V4/PKCS11_in_LDAP/Schema#CK_MECHANISM_TYPE
|
||||
keytype_name2id = {
|
||||
"rsa": _ipap11helper.KEY_TYPE_RSA,
|
||||
"aes": _ipap11helper.KEY_TYPE_AES,
|
||||
}
|
||||
|
||||
keytype_id2name = {v: k for k, v in keytype_name2id.items()}
|
||||
|
||||
wrappingmech_name2id = {
|
||||
"rsaPkcs": _ipap11helper.MECH_RSA_PKCS,
|
||||
"rsaPkcsOaep": _ipap11helper.MECH_RSA_PKCS_OAEP,
|
||||
"aesKeyWrap": _ipap11helper.MECH_AES_KEY_WRAP,
|
||||
"aesKeyWrapPad": _ipap11helper.MECH_AES_KEY_WRAP_PAD
|
||||
}
|
||||
|
||||
wrappingmech_id2name = {v: k for k, v in wrappingmech_name2id.items()}
|
||||
|
||||
|
||||
bool_attr_names = set([
|
||||
'ipk11alwaysauthenticate',
|
||||
'ipk11alwayssensitive',
|
||||
'ipk11copyable',
|
||||
'ipk11decrypt',
|
||||
'ipk11derive',
|
||||
'ipk11encrypt',
|
||||
'ipk11extractable',
|
||||
'ipk11local',
|
||||
'ipk11modifiable',
|
||||
'ipk11neverextractable',
|
||||
'ipk11private',
|
||||
'ipk11sensitive',
|
||||
'ipk11sign',
|
||||
'ipk11signrecover',
|
||||
'ipk11trusted',
|
||||
'ipk11unwrap',
|
||||
'ipk11verify',
|
||||
'ipk11verifyrecover',
|
||||
'ipk11wrap',
|
||||
'ipk11wrapwithtrusted',
|
||||
])
|
||||
|
||||
modifiable_attrs_id2name = {
|
||||
_ipap11helper.CKA_DECRYPT: 'ipk11decrypt',
|
||||
_ipap11helper.CKA_DERIVE: 'ipk11derive',
|
||||
_ipap11helper.CKA_ENCRYPT: 'ipk11encrypt',
|
||||
_ipap11helper.CKA_EXTRACTABLE: 'ipk11extractable',
|
||||
_ipap11helper.CKA_ID: 'ipk11id',
|
||||
_ipap11helper.CKA_LABEL: 'ipk11label',
|
||||
_ipap11helper.CKA_SENSITIVE: 'ipk11sensitive',
|
||||
_ipap11helper.CKA_SIGN: 'ipk11sign',
|
||||
_ipap11helper.CKA_SIGN_RECOVER: 'ipk11signrecover',
|
||||
_ipap11helper.CKA_UNWRAP: 'ipk11unwrap',
|
||||
_ipap11helper.CKA_VERIFY: 'ipk11verify',
|
||||
_ipap11helper.CKA_VERIFY_RECOVER: 'ipk11verifyrecover',
|
||||
_ipap11helper.CKA_WRAP: 'ipk11wrap',
|
||||
}
|
||||
|
||||
modifiable_attrs_name2id = {v: k for k, v in modifiable_attrs_id2name.items()}
|
||||
|
||||
|
||||
def sync_pkcs11_metadata(name, source, target):
|
||||
"""sync ipk11 metadata from source object to target object"""
|
||||
|
||||
# iterate over list of modifiable PKCS#11 attributes - this prevents us
|
||||
# from attempting to set read-only attributes like CKA_LOCAL
|
||||
for attr in modifiable_attrs_name2id:
|
||||
if attr in source:
|
||||
if source[attr] != target[attr]:
|
||||
logger.debug('%s: Updating attribute %s from "%s" to "%s"',
|
||||
name,
|
||||
attr,
|
||||
repr(source[attr]),
|
||||
repr(target[attr]))
|
||||
target[attr] = source[attr]
|
||||
|
||||
def populate_pkcs11_metadata(source, target):
|
||||
"""populate all ipk11 metadata attributes in target object from source object"""
|
||||
for attr in attrs_name2id:
|
||||
if attr in source:
|
||||
target[attr] = source[attr]
|
||||
|
||||
def ldap2p11helper_api_params(ldap_key):
|
||||
"""prepare dict with metadata parameters suitable for key unwrapping"""
|
||||
unwrap_params = {}
|
||||
|
||||
# some attributes are just renamed
|
||||
direct_param_map = {
|
||||
"ipk11label": "label",
|
||||
"ipk11id": "id",
|
||||
"ipk11copyable": "cka_copyable",
|
||||
"ipk11decrypt": "cka_decrypt",
|
||||
"ipk11derive": "cka_derive",
|
||||
"ipk11encrypt": "cka_encrypt",
|
||||
"ipk11extractable": "cka_extractable",
|
||||
"ipk11modifiable": "cka_modifiable",
|
||||
"ipk11private": "cka_private",
|
||||
"ipk11sensitive": "cka_sensitive",
|
||||
"ipk11sign": "cka_sign",
|
||||
"ipk11unwrap": "cka_unwrap",
|
||||
"ipk11verify": "cka_verify",
|
||||
"ipk11wrap": "cka_wrap",
|
||||
"ipk11wrapwithtrusted": "cka_wrap_with_trusted"
|
||||
}
|
||||
|
||||
for ldap_name, p11h_name in direct_param_map.items():
|
||||
if ldap_name in ldap_key:
|
||||
unwrap_params[p11h_name] = ldap_key[ldap_name]
|
||||
|
||||
# and some others needs conversion
|
||||
|
||||
indirect_param_map = {
|
||||
"ipk11keytype": ("key_type", keytype_name2id),
|
||||
"ipawrappingmech": ("wrapping_mech", wrappingmech_name2id),
|
||||
}
|
||||
|
||||
for ldap_name, rules in indirect_param_map.items():
|
||||
p11h_name, mapping = rules
|
||||
if ldap_name in ldap_key:
|
||||
unwrap_params[p11h_name] = mapping[ldap_key[ldap_name]]
|
||||
|
||||
return unwrap_params
|
||||
|
||||
|
||||
class AbstractHSM(object):
|
||||
def _filter_replica_keys(self, all_keys):
|
||||
replica_keys = {}
|
||||
for key_id, key in all_keys.items():
|
||||
if not key['ipk11label'].startswith('dnssec-replica:'):
|
||||
continue
|
||||
replica_keys[key_id] = key
|
||||
return replica_keys
|
||||
|
||||
def _filter_zone_keys(self, all_keys):
|
||||
zone_keys = {}
|
||||
for key_id, key in all_keys.items():
|
||||
if key['ipk11label'] == u'dnssec-master' \
|
||||
or key['ipk11label'].startswith('dnssec-replica:'):
|
||||
continue
|
||||
zone_keys[key_id] = key
|
||||
return zone_keys
|
||||
@@ -1,223 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
import dns.name
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
|
||||
import ipalib.constants
|
||||
from ipapython.dn import DN
|
||||
from ipapython import ipautil
|
||||
from ipaplatform.paths import paths
|
||||
|
||||
from ipaserver.dnssec.temp import TemporaryDirectory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
time_bindfmt = '%Y%m%d%H%M%S'
|
||||
|
||||
# this daemon should run under ods:named user:group
|
||||
# user has to be ods because ODSMgr.py sends signal to ods-enforcerd
|
||||
FILE_PERM = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IWUSR)
|
||||
DIR_PERM = (stat.S_IRWXU | stat.S_IRWXG)
|
||||
|
||||
class BINDMgr(object):
|
||||
"""BIND key manager. It does LDAP->BIND key files synchronization.
|
||||
|
||||
One LDAP object with idnsSecKey object class will produce
|
||||
single pair of BIND key files.
|
||||
"""
|
||||
def __init__(self, api):
|
||||
self.api = api
|
||||
self.ldap_keys = {}
|
||||
self.modified_zones = set()
|
||||
|
||||
def notify_zone(self, zone):
|
||||
cmd = ['rndc', 'sign', zone.to_text()]
|
||||
result = ipautil.run(cmd, capture_output=True)
|
||||
logger.info('%s', result.output_log)
|
||||
|
||||
def dn2zone_name(self, dn):
|
||||
"""cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
|
||||
# verify that metadata object is under DNS sub-tree
|
||||
dn = DN(dn)
|
||||
container = DN(self.api.env.container_dns, self.api.env.basedn)
|
||||
idx = dn.rfind(container)
|
||||
assert idx != -1, 'Metadata object %s is not inside %s' % (dn, container)
|
||||
assert len(dn[idx - 1]) == 1, 'Multi-valued RDN as zone name is not supported'
|
||||
return dns.name.from_text(dn[idx - 1]['idnsname'])
|
||||
|
||||
def time_ldap2bindfmt(self, str_val):
|
||||
dt = datetime.strptime(str_val, ipalib.constants.LDAP_GENERALIZED_TIME_FORMAT)
|
||||
return dt.strftime(time_bindfmt)
|
||||
|
||||
def dates2params(self, ldap_attrs):
|
||||
"""Convert LDAP timestamps to list of parameters suitable
|
||||
for dnssec-keyfromlabel utility"""
|
||||
attr2param = {'idnsseckeypublish': '-P',
|
||||
'idnsseckeyactivate': '-A',
|
||||
'idnsseckeyinactive': '-I',
|
||||
'idnsseckeydelete': '-D'}
|
||||
|
||||
params = []
|
||||
for attr, param in attr2param.items():
|
||||
params.append(param)
|
||||
if attr in ldap_attrs:
|
||||
assert len(ldap_attrs[attr]) == 1, 'Timestamp %s is expected to be single-valued' % attr
|
||||
params.append(self.time_ldap2bindfmt(ldap_attrs[attr][0]))
|
||||
else:
|
||||
params.append('none')
|
||||
|
||||
return params
|
||||
|
||||
def ldap_event(self, op, uuid, attrs):
|
||||
"""Record single LDAP event - key addition, deletion or modification.
|
||||
|
||||
Change is only recorded to memory.
|
||||
self.sync() has to be called to synchronize change to BIND."""
|
||||
assert op == 'add' or op == 'del' or op == 'mod'
|
||||
zone = self.dn2zone_name(attrs['dn'])
|
||||
self.modified_zones.add(zone)
|
||||
zone_keys = self.ldap_keys.setdefault(zone, {})
|
||||
if op == 'add':
|
||||
logger.info('Key metadata %s added to zone %s',
|
||||
attrs['dn'], zone)
|
||||
zone_keys[uuid] = attrs
|
||||
|
||||
elif op == 'del':
|
||||
logger.info('Key metadata %s deleted from zone %s',
|
||||
attrs['dn'], zone)
|
||||
zone_keys.pop(uuid)
|
||||
|
||||
elif op == 'mod':
|
||||
logger.info('Key metadata %s updated in zone %s',
|
||||
attrs['dn'], zone)
|
||||
zone_keys[uuid] = attrs
|
||||
|
||||
def install_key(self, zone, uuid, attrs, workdir):
|
||||
"""Run dnssec-keyfromlabel on given LDAP object.
|
||||
:returns: base file name of output files, e.g. Kaaa.test.+008+19719"""
|
||||
logger.info('attrs: %s', attrs)
|
||||
assert attrs.get('idnsseckeyzone', ['FALSE'])[0] == 'TRUE', \
|
||||
'object %s is not a DNS zone key' % attrs['dn']
|
||||
|
||||
uri = "%s;pin-source=%s" % (attrs['idnsSecKeyRef'][0], paths.DNSSEC_SOFTHSM_PIN)
|
||||
cmd = [paths.DNSSEC_KEYFROMLABEL, '-K', workdir, '-a', attrs['idnsSecAlgorithm'][0], '-l', uri]
|
||||
cmd += self.dates2params(attrs)
|
||||
if attrs.get('idnsSecKeySep', ['FALSE'])[0].upper() == 'TRUE':
|
||||
cmd += ['-f', 'KSK']
|
||||
if attrs.get('idnsSecKeyRevoke', ['FALSE'])[0].upper() == 'TRUE':
|
||||
cmd += ['-R', datetime.now().strftime(time_bindfmt)]
|
||||
cmd.append(zone.to_text())
|
||||
|
||||
# keys has to be readable by ODS & named
|
||||
result = ipautil.run(cmd, capture_output=True)
|
||||
basename = result.output.strip()
|
||||
private_fn = "%s/%s.private" % (workdir, basename)
|
||||
os.chmod(private_fn, FILE_PERM)
|
||||
# this is useful mainly for debugging
|
||||
with open("%s/%s.uuid" % (workdir, basename), 'w') as uuid_file:
|
||||
uuid_file.write(uuid)
|
||||
with open("%s/%s.dn" % (workdir, basename), 'w') as dn_file:
|
||||
dn_file.write(attrs['dn'])
|
||||
|
||||
def get_zone_dir_name(self, zone):
|
||||
"""Escape zone name to form suitable for file-system.
|
||||
|
||||
This method has to be equivalent to zr_get_zone_path()
|
||||
in bind-dyndb-ldap/zone_register.c."""
|
||||
|
||||
if zone == dns.name.root:
|
||||
return "@"
|
||||
|
||||
# strip final (empty) label
|
||||
zone = zone.relativize(dns.name.root)
|
||||
escaped = ""
|
||||
for label in zone:
|
||||
for char in label:
|
||||
c = ord(char)
|
||||
if ((c >= 0x30 and c <= 0x39) or # digit
|
||||
(c >= 0x41 and c <= 0x5A) or # uppercase
|
||||
(c >= 0x61 and c <= 0x7A) or # lowercase
|
||||
c == 0x2D or # hyphen
|
||||
c == 0x5F): # underscore
|
||||
if (c >= 0x41 and c <= 0x5A): # downcase
|
||||
c += 0x20
|
||||
escaped += chr(c)
|
||||
else:
|
||||
escaped += "%%%02X" % c
|
||||
escaped += '.'
|
||||
|
||||
# strip trailing period
|
||||
return escaped[:-1]
|
||||
|
||||
def sync_zone(self, zone):
|
||||
logger.info('Synchronizing zone %s', zone)
|
||||
zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR,
|
||||
self.get_zone_dir_name(zone))
|
||||
try:
|
||||
os.makedirs(zone_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise e
|
||||
|
||||
# fix HSM permissions
|
||||
# TODO: move out
|
||||
for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True):
|
||||
for name in dirs:
|
||||
fpath = os.path.join(prefix, name)
|
||||
logger.debug('Fixing directory permissions: %s', fpath)
|
||||
os.chmod(fpath, DIR_PERM | stat.S_ISGID)
|
||||
for name in files:
|
||||
fpath = os.path.join(prefix, name)
|
||||
logger.debug('Fixing file permissions: %s', fpath)
|
||||
os.chmod(fpath, FILE_PERM)
|
||||
# TODO: move out
|
||||
|
||||
with TemporaryDirectory(zone_path) as tempdir:
|
||||
for uuid, attrs in self.ldap_keys[zone].items():
|
||||
self.install_key(zone, uuid, attrs, tempdir)
|
||||
# keys were generated in a temporary directory, swap directories
|
||||
target_dir = "%s/keys" % zone_path
|
||||
try:
|
||||
shutil.rmtree(target_dir)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise e
|
||||
shutil.move(tempdir, target_dir)
|
||||
os.chmod(target_dir, DIR_PERM)
|
||||
|
||||
self.notify_zone(zone)
|
||||
|
||||
def sync(self, dnssec_zones):
|
||||
"""Synchronize list of zones in LDAP with BIND.
|
||||
|
||||
dnssec_zones lists zones which should be processed. All other zones
|
||||
will be ignored even though they were modified using ldap_event().
|
||||
|
||||
This filter is useful in cases where LDAP contains DNS zones which
|
||||
have old metadata objects and DNSSEC disabled. Such zones must be
|
||||
ignored to prevent errors while calling dnssec-keyfromlabel or rndc.
|
||||
"""
|
||||
logger.debug('Key metadata in LDAP: %s', self.ldap_keys)
|
||||
logger.debug('Zones modified but skipped during bindmgr.sync: %s',
|
||||
self.modified_zones - dnssec_zones)
|
||||
for zone in self.modified_zones.intersection(dnssec_zones):
|
||||
self.sync_zone(zone)
|
||||
|
||||
self.modified_zones = set()
|
||||
|
||||
def diff_zl(self, s1, s2):
|
||||
"""Compute zones present in s1 but not present in s2.
|
||||
|
||||
Returns: List of (uuid, name) tuples with zones present only in s1."""
|
||||
s1_extra = s1.uuids - s2.uuids
|
||||
removed = [(uuid, name) for (uuid, name) in s1.mapping.items()
|
||||
if uuid in s1_extra]
|
||||
return removed
|
||||
@@ -1,196 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
import ldap.dn
|
||||
import os
|
||||
|
||||
import dns.name
|
||||
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython import ipautil
|
||||
|
||||
from ipaserver.dnssec.syncrepl import SyncReplConsumer
|
||||
from ipaserver.dnssec.odsmgr import ODSMgr
|
||||
from ipaserver.dnssec.bindmgr import BINDMgr
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SIGNING_ATTR = 'idnsSecInlineSigning'
|
||||
OBJCLASS_ATTR = 'objectClass'
|
||||
|
||||
|
||||
class KeySyncer(SyncReplConsumer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
# hack
|
||||
self.api = kwargs['ipa_api']
|
||||
del kwargs['ipa_api']
|
||||
|
||||
# DNSSEC master should have OpenDNSSEC installed
|
||||
# TODO: Is this the best way?
|
||||
if os.environ.get('ISMASTER', '0') == '1':
|
||||
self.ismaster = True
|
||||
self.odsmgr = ODSMgr()
|
||||
else:
|
||||
self.ismaster = False
|
||||
|
||||
self.bindmgr = BINDMgr(self.api)
|
||||
self.init_done = False
|
||||
self.dnssec_zones = set()
|
||||
SyncReplConsumer.__init__(self, *args, **kwargs)
|
||||
|
||||
def _get_objclass(self, attrs):
|
||||
"""Get object class.
|
||||
|
||||
Given set of attributes has to have exactly one supported object class.
|
||||
"""
|
||||
supported_objclasses = set(['idnszone', 'idnsseckey', 'ipk11publickey'])
|
||||
present_objclasses = set([o.lower() for o in attrs[OBJCLASS_ATTR]]).intersection(supported_objclasses)
|
||||
assert len(present_objclasses) == 1, attrs[OBJCLASS_ATTR]
|
||||
return present_objclasses.pop()
|
||||
|
||||
def __get_signing_attr(self, attrs):
|
||||
"""Get SIGNING_ATTR from dictionary with LDAP zone attributes.
|
||||
|
||||
Returned value is normalized to TRUE or FALSE, defaults to FALSE."""
|
||||
values = attrs.get(SIGNING_ATTR, ['FALSE'])
|
||||
assert len(values) == 1, '%s is expected to be single-valued' \
|
||||
% SIGNING_ATTR
|
||||
return values[0].upper()
|
||||
|
||||
def __is_dnssec_enabled(self, attrs):
|
||||
"""Test if LDAP DNS zone with given attributes is DNSSEC enabled."""
|
||||
return self.__get_signing_attr(attrs) == 'TRUE'
|
||||
|
||||
def __is_replica_pubkey(self, attrs):
|
||||
vals = attrs.get('ipk11label', [])
|
||||
if len(vals) != 1:
|
||||
return False
|
||||
return vals[0].startswith('dnssec-replica:')
|
||||
|
||||
def application_add(self, uuid, dn, newattrs):
|
||||
objclass = self._get_objclass(newattrs)
|
||||
if objclass == 'idnszone':
|
||||
self.zone_add(uuid, dn, newattrs)
|
||||
elif objclass == 'idnsseckey':
|
||||
self.key_meta_add(uuid, dn, newattrs)
|
||||
elif objclass == 'ipk11publickey' and \
|
||||
self.__is_replica_pubkey(newattrs):
|
||||
self.hsm_master_sync()
|
||||
|
||||
def application_del(self, uuid, dn, oldattrs):
|
||||
objclass = self._get_objclass(oldattrs)
|
||||
if objclass == 'idnszone':
|
||||
self.zone_del(uuid, dn, oldattrs)
|
||||
elif objclass == 'idnsseckey':
|
||||
self.key_meta_del(uuid, dn, oldattrs)
|
||||
elif objclass == 'ipk11publickey' and \
|
||||
self.__is_replica_pubkey(oldattrs):
|
||||
self.hsm_master_sync()
|
||||
|
||||
def application_sync(self, uuid, dn, newattrs, oldattrs):
|
||||
objclass = self._get_objclass(oldattrs)
|
||||
if objclass == 'idnszone':
|
||||
olddn = ldap.dn.str2dn(oldattrs['dn'])
|
||||
newdn = ldap.dn.str2dn(newattrs['dn'])
|
||||
assert olddn == newdn, 'modrdn operation is not supported'
|
||||
|
||||
oldval = self.__get_signing_attr(oldattrs)
|
||||
newval = self.__get_signing_attr(newattrs)
|
||||
if oldval != newval:
|
||||
if self.__is_dnssec_enabled(newattrs):
|
||||
self.zone_add(uuid, olddn, newattrs)
|
||||
else:
|
||||
self.zone_del(uuid, olddn, oldattrs)
|
||||
|
||||
elif objclass == 'idnsseckey':
|
||||
self.key_metadata_sync(uuid, dn, oldattrs, newattrs)
|
||||
|
||||
elif objclass == 'ipk11publickey' and \
|
||||
self.__is_replica_pubkey(newattrs):
|
||||
self.hsm_master_sync()
|
||||
|
||||
def syncrepl_refreshdone(self):
|
||||
logger.info('Initial LDAP dump is done, sychronizing with ODS and '
|
||||
'BIND')
|
||||
self.init_done = True
|
||||
self.ods_sync()
|
||||
self.hsm_replica_sync()
|
||||
self.hsm_master_sync()
|
||||
self.bindmgr.sync(self.dnssec_zones)
|
||||
|
||||
# idnsSecKey wrapper
|
||||
# Assumption: metadata points to the same key blob all the time,
|
||||
# i.e. it is not necessary to re-download blobs because of change in DNSSEC
|
||||
# metadata - DNSSEC flags or timestamps.
|
||||
def key_meta_add(self, uuid, dn, newattrs):
|
||||
self.hsm_replica_sync()
|
||||
self.bindmgr.ldap_event('add', uuid, newattrs)
|
||||
self.bindmgr_sync(self.dnssec_zones)
|
||||
|
||||
def key_meta_del(self, uuid, dn, oldattrs):
|
||||
self.bindmgr.ldap_event('del', uuid, oldattrs)
|
||||
self.bindmgr_sync(self.dnssec_zones)
|
||||
self.hsm_replica_sync()
|
||||
|
||||
def key_metadata_sync(self, uuid, dn, oldattrs, newattrs):
|
||||
self.bindmgr.ldap_event('mod', uuid, newattrs)
|
||||
self.bindmgr_sync(self.dnssec_zones)
|
||||
|
||||
def bindmgr_sync(self, dnssec_zones):
|
||||
if self.init_done:
|
||||
self.bindmgr.sync(dnssec_zones)
|
||||
|
||||
# idnsZone wrapper
|
||||
def zone_add(self, uuid, dn, newattrs):
|
||||
zone = dns.name.from_text(newattrs['idnsname'][0])
|
||||
if self.__is_dnssec_enabled(newattrs):
|
||||
self.dnssec_zones.add(zone)
|
||||
else:
|
||||
self.dnssec_zones.discard(zone)
|
||||
|
||||
if not self.ismaster:
|
||||
return
|
||||
|
||||
if self.__is_dnssec_enabled(newattrs):
|
||||
self.odsmgr.ldap_event('add', uuid, newattrs)
|
||||
self.ods_sync()
|
||||
|
||||
def zone_del(self, uuid, dn, oldattrs):
|
||||
zone = dns.name.from_text(oldattrs['idnsname'][0])
|
||||
self.dnssec_zones.discard(zone)
|
||||
|
||||
if not self.ismaster:
|
||||
return
|
||||
|
||||
if self.__is_dnssec_enabled(oldattrs):
|
||||
self.odsmgr.ldap_event('del', uuid, oldattrs)
|
||||
self.ods_sync()
|
||||
|
||||
def ods_sync(self):
|
||||
if not self.ismaster:
|
||||
return
|
||||
|
||||
if self.init_done:
|
||||
self.odsmgr.sync()
|
||||
|
||||
# triggered by modification to idnsSecKey objects
|
||||
def hsm_replica_sync(self):
|
||||
"""Download keys from LDAP to local HSM."""
|
||||
if self.ismaster:
|
||||
return
|
||||
if not self.init_done:
|
||||
return
|
||||
ipautil.run([paths.IPA_DNSKEYSYNCD_REPLICA])
|
||||
|
||||
# triggered by modification to ipk11PublicKey objects
|
||||
def hsm_master_sync(self):
|
||||
"""Download replica keys from LDAP to local HSM
|
||||
& upload master and zone keys to LDAP."""
|
||||
if not self.ismaster:
|
||||
return
|
||||
if not self.init_done:
|
||||
return
|
||||
ipautil.run([paths.ODS_SIGNER, 'ipa-hsm-update'])
|
||||
@@ -1,455 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from binascii import hexlify
|
||||
import collections
|
||||
import logging
|
||||
from pprint import pprint
|
||||
|
||||
import ipalib
|
||||
from ipaplatform.paths import paths
|
||||
from ipapython.dn import DN
|
||||
from ipapython import ipaldap
|
||||
from ipapython import ipa_log_manager
|
||||
|
||||
from ipaserver.dnssec.abshsm import (
|
||||
attrs_name2id,
|
||||
AbstractHSM,
|
||||
bool_attr_names,
|
||||
populate_pkcs11_metadata)
|
||||
from ipaserver import p11helper as _ipap11helper
|
||||
import uuid
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def uri_escape(val):
|
||||
"""convert val to %-notation suitable for ID component in URI"""
|
||||
assert len(val) > 0, "zero-length URI component detected"
|
||||
hexval = hexlify(val)
|
||||
out = '%'
|
||||
# pylint: disable=E1127
|
||||
out += '%'.join(hexval[i:i+2] for i in range(0, len(hexval), 2))
|
||||
return out
|
||||
|
||||
def ldap_bool(val):
|
||||
if val == 'TRUE' or val is True:
|
||||
return True
|
||||
elif val == 'FALSE' or val is False:
|
||||
return False
|
||||
else:
|
||||
raise AssertionError('invalid LDAP boolean "%s"' % val)
|
||||
|
||||
def get_default_attrs(object_classes):
|
||||
# object class -> default attribute values mapping
|
||||
defaults = {
|
||||
u'ipk11publickey': {
|
||||
'ipk11copyable': True,
|
||||
'ipk11derive': False,
|
||||
'ipk11encrypt': False,
|
||||
'ipk11local': True,
|
||||
'ipk11modifiable': True,
|
||||
'ipk11private': True,
|
||||
'ipk11trusted': False,
|
||||
'ipk11verify': True,
|
||||
'ipk11verifyrecover': True,
|
||||
'ipk11wrap': False
|
||||
},
|
||||
u'ipk11privatekey': {
|
||||
'ipk11alwaysauthenticate': False,
|
||||
'ipk11alwayssensitive': True,
|
||||
'ipk11copyable': True,
|
||||
'ipk11decrypt': False,
|
||||
'ipk11derive': False,
|
||||
'ipk11extractable': True,
|
||||
'ipk11local': True,
|
||||
'ipk11modifiable': True,
|
||||
'ipk11neverextractable': False,
|
||||
'ipk11private': True,
|
||||
'ipk11sensitive': True,
|
||||
'ipk11sign': True,
|
||||
'ipk11signrecover': True,
|
||||
'ipk11unwrap': False,
|
||||
'ipk11wrapwithtrusted': False
|
||||
},
|
||||
u'ipk11secretkey': {
|
||||
'ipk11alwaysauthenticate': False,
|
||||
'ipk11alwayssensitive': True,
|
||||
'ipk11copyable': True,
|
||||
'ipk11decrypt': False,
|
||||
'ipk11derive': False,
|
||||
'ipk11encrypt': False,
|
||||
'ipk11extractable': True,
|
||||
'ipk11local': True,
|
||||
'ipk11modifiable': True,
|
||||
'ipk11neverextractable': False,
|
||||
'ipk11private': True,
|
||||
'ipk11sensitive': True,
|
||||
'ipk11sign': False,
|
||||
'ipk11trusted': False,
|
||||
'ipk11unwrap': True,
|
||||
'ipk11verify': False,
|
||||
'ipk11wrap': True,
|
||||
'ipk11wrapwithtrusted': False
|
||||
}
|
||||
}
|
||||
|
||||
# get set of supported object classes
|
||||
present_clss = set()
|
||||
for cls in object_classes:
|
||||
present_clss.add(cls.lower())
|
||||
present_clss.intersection_update(set(defaults.keys()))
|
||||
if len(present_clss) <= 0:
|
||||
raise AssertionError('none of "%s" object classes are supported' %
|
||||
object_classes)
|
||||
|
||||
result = {}
|
||||
for cls in present_clss:
|
||||
result.update(defaults[cls])
|
||||
return result
|
||||
|
||||
|
||||
class Key(collections.MutableMapping):
|
||||
"""abstraction to hide LDAP entry weirdnesses:
|
||||
- non-normalized attribute names
|
||||
- boolean attributes returned as strings
|
||||
- planned entry deletion prevents subsequent use of the instance
|
||||
"""
|
||||
def __init__(self, entry, ldap, ldapkeydb):
|
||||
self.entry = entry
|
||||
self._delentry = None # indicates that object was deleted
|
||||
self.ldap = ldap
|
||||
self.ldapkeydb = ldapkeydb
|
||||
|
||||
def __assert_not_deleted(self):
|
||||
assert self.entry and not self._delentry, (
|
||||
"attempt to use to-be-deleted entry %s detected"
|
||||
% self._delentry.dn)
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.__assert_not_deleted()
|
||||
val = self.entry.single_value[key]
|
||||
if key.lower() in bool_attr_names:
|
||||
val = ldap_bool(val)
|
||||
return val
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__assert_not_deleted()
|
||||
self.entry[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.__assert_not_deleted()
|
||||
del self.entry[key]
|
||||
|
||||
def __iter__(self):
|
||||
"""generates list of ipa names of all PKCS#11 attributes present in the object"""
|
||||
self.__assert_not_deleted()
|
||||
for ipa_name in list(self.entry.keys()):
|
||||
lowercase = ipa_name.lower()
|
||||
if lowercase in attrs_name2id:
|
||||
yield lowercase
|
||||
|
||||
def __len__(self):
|
||||
self.__assert_not_deleted()
|
||||
return len(self.entry)
|
||||
|
||||
def __repr__(self):
|
||||
if self._delentry:
|
||||
return 'deleted entry: %s' % repr(self._delentry)
|
||||
|
||||
sanitized = dict(self.entry)
|
||||
for attr in ['ipaPrivateKey', 'ipaPublicKey', 'ipk11publickeyinfo']:
|
||||
if attr in sanitized:
|
||||
del sanitized[attr]
|
||||
return repr(sanitized)
|
||||
|
||||
def _cleanup_key(self):
|
||||
"""remove default values from LDAP entry"""
|
||||
default_attrs = get_default_attrs(self.entry['objectclass'])
|
||||
empty = object()
|
||||
for attr in default_attrs:
|
||||
if self.get(attr, empty) == default_attrs[attr]:
|
||||
del self[attr]
|
||||
|
||||
def _update_key(self):
|
||||
"""remove default values from LDAP entry and write back changes"""
|
||||
if self._delentry:
|
||||
self._delete_key()
|
||||
return
|
||||
|
||||
self._cleanup_key()
|
||||
|
||||
try:
|
||||
self.ldap.update_entry(self.entry)
|
||||
except ipalib.errors.EmptyModlist:
|
||||
pass
|
||||
|
||||
def _delete_key(self):
|
||||
"""remove key metadata entry from LDAP
|
||||
|
||||
After calling this, the python object is no longer valid and all
|
||||
subsequent method calls on it will fail.
|
||||
"""
|
||||
assert not self.entry, (
|
||||
"Key._delete_key() called before Key.schedule_deletion()")
|
||||
assert self._delentry, "Key._delete_key() called more than once"
|
||||
logger.debug('deleting key id 0x%s DN %s from LDAP',
|
||||
hexlify(self._delentry.single_value['ipk11id']),
|
||||
self._delentry.dn)
|
||||
self.ldap.delete_entry(self._delentry)
|
||||
self._delentry = None
|
||||
self.ldap = None
|
||||
self.ldapkeydb = None
|
||||
|
||||
def schedule_deletion(self):
|
||||
"""schedule key deletion from LDAP
|
||||
|
||||
Calling schedule_deletion() will make this object incompatible with
|
||||
normal Key. After that the object must not be read or modified.
|
||||
Key metadata will be actually deleted when LdapKeyDB.flush() is called.
|
||||
"""
|
||||
assert not self._delentry, (
|
||||
"Key.schedule_deletion() called more than once")
|
||||
self._delentry = self.entry
|
||||
self.entry = None
|
||||
|
||||
|
||||
class ReplicaKey(Key):
|
||||
# TODO: object class assert
|
||||
def __init__(self, entry, ldap, ldapkeydb):
|
||||
super(ReplicaKey, self).__init__(entry, ldap, ldapkeydb)
|
||||
|
||||
class MasterKey(Key):
|
||||
# TODO: object class assert
|
||||
def __init__(self, entry, ldap, ldapkeydb):
|
||||
super(MasterKey, self).__init__(entry, ldap, ldapkeydb)
|
||||
|
||||
@property
|
||||
def wrapped_entries(self):
|
||||
"""LDAP entires with wrapped data
|
||||
|
||||
One entry = one blob + ipaWrappingKey pointer to unwrapping key"""
|
||||
|
||||
keys = []
|
||||
if 'ipaSecretKeyRef' not in self.entry:
|
||||
return keys
|
||||
|
||||
for dn in self.entry['ipaSecretKeyRef']:
|
||||
try:
|
||||
obj = self.ldap.get_entry(dn)
|
||||
keys.append(obj)
|
||||
except ipalib.errors.NotFound:
|
||||
continue
|
||||
|
||||
return keys
|
||||
|
||||
def add_wrapped_data(self, data, wrapping_mech, replica_key_id):
|
||||
wrapping_key_uri = 'pkcs11:id=%s;type=public' \
|
||||
% uri_escape(replica_key_id)
|
||||
# TODO: replace this with 'autogenerate' to prevent collisions
|
||||
uuid_rdn = DN('ipk11UniqueId=%s' % uuid.uuid1())
|
||||
entry_dn = DN(uuid_rdn, self.ldapkeydb.base_dn)
|
||||
entry = self.ldap.make_entry(entry_dn,
|
||||
objectClass=['ipaSecretKeyObject', 'ipk11Object'],
|
||||
ipaSecretKey=data,
|
||||
ipaWrappingKey=wrapping_key_uri,
|
||||
ipaWrappingMech=wrapping_mech)
|
||||
|
||||
logger.info('adding master key 0x%s wrapped with replica key 0x%s to '
|
||||
'%s',
|
||||
hexlify(self['ipk11id']),
|
||||
hexlify(replica_key_id),
|
||||
entry_dn)
|
||||
self.ldap.add_entry(entry)
|
||||
if 'ipaSecretKeyRef' not in self.entry:
|
||||
self.entry['objectClass'] += ['ipaSecretKeyRefObject']
|
||||
self.entry.setdefault('ipaSecretKeyRef', []).append(entry_dn)
|
||||
|
||||
|
||||
class LdapKeyDB(AbstractHSM):
|
||||
def __init__(self, ldap, base_dn):
|
||||
self.ldap = ldap
|
||||
self.base_dn = base_dn
|
||||
self.cache_replica_pubkeys_wrap = None
|
||||
self.cache_masterkeys = None
|
||||
self.cache_zone_keypairs = None
|
||||
|
||||
def _get_key_dict(self, key_type, ldap_filter):
|
||||
try:
|
||||
objs = self.ldap.get_entries(base_dn=self.base_dn,
|
||||
filter=ldap_filter)
|
||||
except ipalib.errors.NotFound:
|
||||
return {}
|
||||
|
||||
keys = {}
|
||||
for o in objs:
|
||||
# add default values not present in LDAP
|
||||
key = key_type(o, self.ldap, self)
|
||||
default_attrs = get_default_attrs(key.entry['objectclass'])
|
||||
for attr in default_attrs:
|
||||
key.setdefault(attr, default_attrs[attr])
|
||||
|
||||
assert 'ipk11id' in key, 'key is missing ipk11Id in %s' % key.entry.dn
|
||||
key_id = key['ipk11id']
|
||||
assert key_id not in keys, 'duplicate ipk11Id=0x%s in "%s" and "%s"' % (hexlify(key_id), key.entry.dn, keys[key_id].entry.dn)
|
||||
assert 'ipk11label' in key, 'key "%s" is missing ipk11Label' % key.entry.dn
|
||||
assert 'objectclass' in key.entry, 'key "%s" is missing objectClass attribute' % key.entry.dn
|
||||
|
||||
keys[key_id] = key
|
||||
|
||||
self._update_keys()
|
||||
return keys
|
||||
|
||||
def _update_keys(self):
|
||||
for cache in [self.cache_masterkeys, self.cache_replica_pubkeys_wrap,
|
||||
self.cache_zone_keypairs]:
|
||||
if cache:
|
||||
for key in cache.values():
|
||||
key._update_key()
|
||||
|
||||
def flush(self):
|
||||
"""write back content of caches to LDAP"""
|
||||
self._update_keys()
|
||||
self.cache_masterkeys = None
|
||||
self.cache_replica_pubkeys_wrap = None
|
||||
self.cache_zone_keypairs = None
|
||||
|
||||
def _import_keys_metadata(self, source_keys):
|
||||
"""import key metadata from Key-compatible objects
|
||||
|
||||
metadata from multiple source keys can be imported into single LDAP
|
||||
object
|
||||
|
||||
:param: source_keys is iterable of (Key object, PKCS#11 object class)"""
|
||||
|
||||
entry_dn = DN('ipk11UniqueId=autogenerate', self.base_dn)
|
||||
entry = self.ldap.make_entry(entry_dn, objectClass=['ipk11Object'])
|
||||
new_key = Key(entry, self.ldap, self)
|
||||
|
||||
for source_key, pkcs11_class in source_keys:
|
||||
if pkcs11_class == _ipap11helper.KEY_CLASS_SECRET_KEY:
|
||||
entry['objectClass'].append('ipk11SecretKey')
|
||||
elif pkcs11_class == _ipap11helper.KEY_CLASS_PUBLIC_KEY:
|
||||
entry['objectClass'].append('ipk11PublicKey')
|
||||
elif pkcs11_class == _ipap11helper.KEY_CLASS_PRIVATE_KEY:
|
||||
entry['objectClass'].append('ipk11PrivateKey')
|
||||
else:
|
||||
raise AssertionError('unsupported object class %s' % pkcs11_class)
|
||||
|
||||
populate_pkcs11_metadata(source_key, new_key)
|
||||
new_key._cleanup_key()
|
||||
return new_key
|
||||
|
||||
def import_master_key(self, mkey):
|
||||
new_key = self._import_keys_metadata(
|
||||
[(mkey, _ipap11helper.KEY_CLASS_SECRET_KEY)])
|
||||
self.ldap.add_entry(new_key.entry)
|
||||
logger.debug('imported master key metadata: %s', new_key.entry)
|
||||
|
||||
def import_zone_key(self, pubkey, pubkey_data, privkey,
|
||||
privkey_wrapped_data, wrapping_mech, master_key_id):
|
||||
new_key = self._import_keys_metadata(
|
||||
[(pubkey, _ipap11helper.KEY_CLASS_PUBLIC_KEY),
|
||||
(privkey, _ipap11helper.KEY_CLASS_PRIVATE_KEY)])
|
||||
|
||||
new_key.entry['objectClass'].append('ipaPrivateKeyObject')
|
||||
new_key.entry['ipaPrivateKey'] = privkey_wrapped_data
|
||||
new_key.entry['ipaWrappingKey'] = 'pkcs11:id=%s;type=secret-key' \
|
||||
% uri_escape(master_key_id)
|
||||
new_key.entry['ipaWrappingMech'] = wrapping_mech
|
||||
|
||||
new_key.entry['objectClass'].append('ipaPublicKeyObject')
|
||||
new_key.entry['ipaPublicKey'] = pubkey_data
|
||||
|
||||
self.ldap.add_entry(new_key.entry)
|
||||
logger.debug('imported zone key id: 0x%s', hexlify(new_key['ipk11id']))
|
||||
|
||||
@property
|
||||
def replica_pubkeys_wrap(self):
|
||||
if self.cache_replica_pubkeys_wrap:
|
||||
return self.cache_replica_pubkeys_wrap
|
||||
|
||||
keys = self._filter_replica_keys(
|
||||
self._get_key_dict(ReplicaKey,
|
||||
'(&(objectClass=ipk11PublicKey)(ipk11Wrap=TRUE)(objectClass=ipaPublicKeyObject))'))
|
||||
|
||||
self.cache_replica_pubkeys_wrap = keys
|
||||
return keys
|
||||
|
||||
@property
|
||||
def master_keys(self):
|
||||
if self.cache_masterkeys:
|
||||
return self.cache_masterkeys
|
||||
|
||||
keys = self._get_key_dict(MasterKey,
|
||||
'(&(objectClass=ipk11SecretKey)(|(ipk11UnWrap=TRUE)(!(ipk11UnWrap=*)))(ipk11Label=dnssec-master))')
|
||||
for key in keys.values():
|
||||
prefix = 'dnssec-master'
|
||||
assert key['ipk11label'] == prefix, \
|
||||
'secret key dn="%s" ipk11id=0x%s ipk11label="%s" with ipk11UnWrap = TRUE does not have '\
|
||||
'"%s" key label' % (
|
||||
key.entry.dn,
|
||||
hexlify(key['ipk11id']),
|
||||
str(key['ipk11label']),
|
||||
prefix)
|
||||
|
||||
self.cache_masterkeys = keys
|
||||
return keys
|
||||
|
||||
@property
|
||||
def zone_keypairs(self):
|
||||
if self.cache_zone_keypairs:
|
||||
return self.cache_zone_keypairs
|
||||
|
||||
self.cache_zone_keypairs = self._filter_zone_keys(
|
||||
self._get_key_dict(Key,
|
||||
'(&(objectClass=ipk11PrivateKey)(objectClass=ipaPrivateKeyObject)(objectClass=ipk11PublicKey)(objectClass=ipaPublicKeyObject))'))
|
||||
|
||||
return self.cache_zone_keypairs
|
||||
|
||||
if __name__ == '__main__':
|
||||
# this is debugging mode
|
||||
# print information we think are useful to stdout
|
||||
# other garbage goes via logger to stderr
|
||||
ipa_log_manager.standard_logging_setup(debug=True)
|
||||
|
||||
# IPA framework initialization
|
||||
# no logging to file
|
||||
ipalib.api.bootstrap(in_server=True, log=None, confdir=paths.ETC_IPA)
|
||||
ipalib.api.finalize()
|
||||
|
||||
# LDAP initialization
|
||||
dns_dn = DN(ipalib.api.env.container_dns, ipalib.api.env.basedn)
|
||||
ldap = ipaldap.LDAPClient(ipalib.api.env.ldap_uri)
|
||||
logger.debug('Connecting to LDAP')
|
||||
# GSSAPI will be used, used has to be kinited already
|
||||
ldap.gssapi_bind()
|
||||
logger.debug('Connected')
|
||||
|
||||
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
|
||||
('cn', 'sec'),
|
||||
ipalib.api.env.container_dns,
|
||||
ipalib.api.env.basedn))
|
||||
|
||||
print('replica public keys: CKA_WRAP = TRUE')
|
||||
print('====================================')
|
||||
for pubkey_id, pubkey in ldapkeydb.replica_pubkeys_wrap.items():
|
||||
print(hexlify(pubkey_id))
|
||||
pprint(pubkey)
|
||||
|
||||
print('')
|
||||
print('master keys')
|
||||
print('===========')
|
||||
for mkey_id, mkey in ldapkeydb.master_keys.items():
|
||||
print(hexlify(mkey_id))
|
||||
pprint(mkey)
|
||||
|
||||
print('')
|
||||
print('zone key pairs')
|
||||
print('==============')
|
||||
for key_id, key in ldapkeydb.zone_keypairs.items():
|
||||
print(hexlify(key_id))
|
||||
pprint(key)
|
||||
@@ -1,227 +0,0 @@
|
||||
#!/usr/bin/python2
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from binascii import hexlify
|
||||
import collections
|
||||
import os
|
||||
from pprint import pprint
|
||||
|
||||
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
|
||||
from ipaplatform.paths import paths
|
||||
from ipaserver import p11helper as _ipap11helper
|
||||
from ipaserver.dnssec.abshsm import (attrs_name2id, attrs_id2name, AbstractHSM,
|
||||
keytype_id2name, keytype_name2id,
|
||||
ldap2p11helper_api_params)
|
||||
|
||||
|
||||
private_key_api_params = set(["label", "id", "data", "unwrapping_key",
|
||||
"wrapping_mech", "key_type", "cka_always_authenticate", "cka_copyable",
|
||||
"cka_decrypt", "cka_derive", "cka_extractable", "cka_modifiable",
|
||||
"cka_private", "cka_sensitive", "cka_sign", "cka_sign_recover",
|
||||
"cka_unwrap", "cka_wrap_with_trusted"])
|
||||
|
||||
public_key_api_params = set(["label", "id", "data", "cka_copyable",
|
||||
"cka_derive", "cka_encrypt", "cka_modifiable", "cka_private",
|
||||
"cka_trusted", "cka_verify", "cka_verify_recover", "cka_wrap"])
|
||||
|
||||
class Key(collections.MutableMapping):
|
||||
def __init__(self, p11, handle):
|
||||
self.p11 = p11
|
||||
self.handle = handle
|
||||
# sanity check CKA_ID and CKA_LABEL
|
||||
try:
|
||||
cka_id = self.p11.get_attribute(handle, _ipap11helper.CKA_ID)
|
||||
assert len(cka_id) != 0, 'ipk11id length should not be 0'
|
||||
except _ipap11helper.NotFound:
|
||||
raise _ipap11helper.NotFound('key without ipk11id: handle %s' % handle)
|
||||
|
||||
try:
|
||||
cka_label = self.p11.get_attribute(handle, _ipap11helper.CKA_LABEL)
|
||||
assert len(cka_label) != 0, 'ipk11label length should not be 0'
|
||||
|
||||
except _ipap11helper.NotFound:
|
||||
raise _ipap11helper.NotFound('key without ipk11label: id 0x%s'
|
||||
% hexlify(cka_id))
|
||||
|
||||
def __getitem__(self, key):
|
||||
key = key.lower()
|
||||
try:
|
||||
value = self.p11.get_attribute(self.handle, attrs_name2id[key])
|
||||
if key == 'ipk11keytype':
|
||||
value = keytype_id2name[value]
|
||||
return value
|
||||
except _ipap11helper.NotFound:
|
||||
raise KeyError()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
key = key.lower()
|
||||
if key == 'ipk11keytype':
|
||||
value = keytype_name2id[value]
|
||||
|
||||
return self.p11.set_attribute(self.handle, attrs_name2id[key], value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
raise _ipap11helper.P11HelperException('__delitem__ is not supported')
|
||||
|
||||
def __iter__(self):
|
||||
"""generates list of ipa names of all attributes present in the object"""
|
||||
for pkcs11_id, ipa_name in attrs_id2name.items():
|
||||
try:
|
||||
self.p11.get_attribute(self.handle, pkcs11_id)
|
||||
except _ipap11helper.NotFound:
|
||||
continue
|
||||
|
||||
yield ipa_name
|
||||
|
||||
def __len__(self):
|
||||
cnt = 0
|
||||
for _attr in self:
|
||||
cnt += 1
|
||||
return cnt
|
||||
|
||||
def __str__(self):
|
||||
return str(dict(self))
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class LocalHSM(AbstractHSM):
|
||||
def __init__(self, library, label, pin):
|
||||
self.cache_replica_pubkeys = None
|
||||
self.p11 = _ipap11helper.P11_Helper(label, pin, library)
|
||||
|
||||
def __del__(self):
|
||||
self.p11.finalize()
|
||||
|
||||
def find_keys(self, **kwargs):
|
||||
"""Return dict with Key objects matching given criteria.
|
||||
|
||||
CKA_ID is used as key so all matching objects have to have unique ID."""
|
||||
|
||||
# this is a hack for old p11-kit URI parser
|
||||
# see https://bugs.freedesktop.org/show_bug.cgi?id=85057
|
||||
if 'uri' in kwargs:
|
||||
kwargs['uri'] = kwargs['uri'].replace('type=', 'object-type=')
|
||||
|
||||
handles = self.p11.find_keys(**kwargs)
|
||||
keys = {}
|
||||
for h in handles:
|
||||
key = Key(self.p11, h)
|
||||
o_id = key['ipk11id']
|
||||
assert o_id not in keys, 'duplicate ipk11Id = 0x%s; keys = %s' % (
|
||||
hexlify(o_id), keys)
|
||||
keys[o_id] = key
|
||||
|
||||
return keys
|
||||
|
||||
@property
|
||||
def replica_pubkeys(self):
|
||||
return self._filter_replica_keys(
|
||||
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY))
|
||||
|
||||
@property
|
||||
def replica_pubkeys_wrap(self):
|
||||
return self._filter_replica_keys(
|
||||
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY,
|
||||
cka_wrap=True))
|
||||
|
||||
@property
|
||||
def master_keys(self):
|
||||
"""Get all usable DNSSEC master keys"""
|
||||
keys = self.find_keys(objclass=_ipap11helper.KEY_CLASS_SECRET_KEY, label=u'dnssec-master', cka_unwrap=True)
|
||||
|
||||
for key in keys.values():
|
||||
prefix = 'dnssec-master'
|
||||
assert key['ipk11label'] == prefix, \
|
||||
'secret key ipk11id=0x%s ipk11label="%s" with ipk11UnWrap = TRUE does not have '\
|
||||
'"%s" key label' % (hexlify(key['ipk11id']),
|
||||
str(key['ipk11label']), prefix)
|
||||
|
||||
return keys
|
||||
|
||||
@property
|
||||
def active_master_key(self):
|
||||
"""Get one active DNSSEC master key suitable for key wrapping"""
|
||||
keys = self.find_keys(objclass=_ipap11helper.KEY_CLASS_SECRET_KEY,
|
||||
label=u'dnssec-master', cka_wrap=True, cka_unwrap=True)
|
||||
assert len(keys) > 0, "DNSSEC master key with UN/WRAP = TRUE not found"
|
||||
return keys.popitem()[1]
|
||||
|
||||
@property
|
||||
def zone_pubkeys(self):
|
||||
return self._filter_zone_keys(
|
||||
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY))
|
||||
|
||||
@property
|
||||
def zone_privkeys(self):
|
||||
return self._filter_zone_keys(
|
||||
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PRIVATE_KEY))
|
||||
|
||||
|
||||
def import_public_key(self, source, data):
|
||||
params = ldap2p11helper_api_params(source)
|
||||
# filter out params inappropriate for public keys
|
||||
for par in set(params).difference(public_key_api_params):
|
||||
del params[par]
|
||||
params['data'] = data
|
||||
|
||||
h = self.p11.import_public_key(**params)
|
||||
return Key(self.p11, h)
|
||||
|
||||
def import_private_key(self, source, data, unwrapping_key):
|
||||
params = ldap2p11helper_api_params(source)
|
||||
# filter out params inappropriate for private keys
|
||||
for par in set(params).difference(private_key_api_params):
|
||||
del params[par]
|
||||
params['data'] = data
|
||||
params['unwrapping_key'] = unwrapping_key.handle
|
||||
|
||||
h = self.p11.import_wrapped_private_key(**params)
|
||||
return Key(self.p11, h)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'SOFTHSM2_CONF' not in os.environ:
|
||||
os.environ['SOFTHSM2_CONF'] = paths.DNSSEC_SOFTHSM2_CONF
|
||||
localhsm = LocalHSM(paths.LIBSOFTHSM2_SO, SOFTHSM_DNSSEC_TOKEN_LABEL,
|
||||
open(paths.DNSSEC_SOFTHSM_PIN).read())
|
||||
|
||||
print('replica public keys: CKA_WRAP = TRUE')
|
||||
print('====================================')
|
||||
for pubkey_id, pubkey in localhsm.replica_pubkeys_wrap.items():
|
||||
print(hexlify(pubkey_id))
|
||||
pprint(pubkey)
|
||||
|
||||
print('')
|
||||
print('replica public keys: all')
|
||||
print('========================')
|
||||
for pubkey_id, pubkey in localhsm.replica_pubkeys.items():
|
||||
print(hexlify(pubkey_id))
|
||||
pprint(pubkey)
|
||||
|
||||
print('')
|
||||
print('master keys')
|
||||
print('===========')
|
||||
for mkey_id, mkey in localhsm.master_keys.items():
|
||||
print(hexlify(mkey_id))
|
||||
pprint(mkey)
|
||||
|
||||
print('')
|
||||
print('zone public keys')
|
||||
print('================')
|
||||
for key_id, key in localhsm.zone_pubkeys.items():
|
||||
print(hexlify(key_id))
|
||||
pprint(key)
|
||||
|
||||
print('')
|
||||
print('zone private keys')
|
||||
print('=================')
|
||||
for key_id, key in localhsm.zone_privkeys.items():
|
||||
print(hexlify(key_id))
|
||||
pprint(key)
|
||||
@@ -1,212 +0,0 @@
|
||||
#!/usr/bin/python2
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
import dns.name
|
||||
try:
|
||||
from xml.etree import cElementTree as etree
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as etree
|
||||
|
||||
from ipapython import ipa_log_manager, ipautil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# hack: zone object UUID is stored as path to imaginary zone file
|
||||
ENTRYUUID_PREFIX = "/var/lib/ipa/dns/zone/entryUUID/"
|
||||
ENTRYUUID_PREFIX_LEN = len(ENTRYUUID_PREFIX)
|
||||
|
||||
|
||||
class ZoneListReader(object):
|
||||
def __init__(self):
|
||||
self.names = set() # dns.name
|
||||
self.uuids = set() # UUID strings
|
||||
self.mapping = dict() # {UUID: dns.name}
|
||||
|
||||
def _add_zone(self, name, zid):
|
||||
"""Add zone & UUID to internal structures.
|
||||
|
||||
Zone with given name and UUID must not exist."""
|
||||
# detect duplicate zone names
|
||||
name = dns.name.from_text(name)
|
||||
assert name not in self.names, \
|
||||
'duplicate name (%s, %s) vs. %s' % (name, zid, self.mapping)
|
||||
# duplicate non-None zid is not allowed
|
||||
assert not zid or zid not in self.uuids, \
|
||||
'duplicate UUID (%s, %s) vs. %s' % (name, zid, self.mapping)
|
||||
|
||||
self.names.add(name)
|
||||
self.uuids.add(zid)
|
||||
self.mapping[zid] = name
|
||||
|
||||
def _del_zone(self, name, zid):
|
||||
"""Remove zone & UUID from internal structures.
|
||||
|
||||
Zone with given name and UUID must exist.
|
||||
"""
|
||||
name = dns.name.from_text(name)
|
||||
assert zid is not None
|
||||
assert name in self.names, \
|
||||
'name (%s, %s) does not exist in %s' % (name, zid, self.mapping)
|
||||
assert zid in self.uuids, \
|
||||
'UUID (%s, %s) does not exist in %s' % (name, zid, self.mapping)
|
||||
assert zid in self.mapping and name == self.mapping[zid], \
|
||||
'pair {%s: %s} does not exist in %s' % (zid, name, self.mapping)
|
||||
|
||||
self.names.remove(name)
|
||||
self.uuids.remove(zid)
|
||||
del self.mapping[zid]
|
||||
|
||||
|
||||
class ODSZoneListReader(ZoneListReader):
|
||||
"""One-shot parser for ODS zonelist.xml."""
|
||||
def __init__(self, zonelist_text):
|
||||
super(ODSZoneListReader, self).__init__()
|
||||
root = etree.fromstring(zonelist_text)
|
||||
self._parse_zonelist(root)
|
||||
|
||||
def _parse_zonelist(self, root):
|
||||
"""iterate over Zone elements with attribute 'name' and
|
||||
add IPA zones to self.zones"""
|
||||
if not root.tag == 'ZoneList':
|
||||
raise ValueError(root.tag)
|
||||
for zone_xml in root.findall('./Zone[@name]'):
|
||||
name, zid = self._parse_ipa_zone(zone_xml)
|
||||
self._add_zone(name, zid)
|
||||
|
||||
def _parse_ipa_zone(self, zone_xml):
|
||||
"""Extract zone name, input adapter and detect IPA zones.
|
||||
|
||||
IPA zones have contains Adapters/Input/Adapter element with
|
||||
attribute type = "File" and with value prefixed with ENTRYUUID_PREFIX.
|
||||
|
||||
Returns:
|
||||
tuple (zone name, ID)
|
||||
"""
|
||||
name = zone_xml.get('name')
|
||||
zids = []
|
||||
for in_adapter in zone_xml.findall(
|
||||
'./Adapters/Input/Adapter[@type="File"]'):
|
||||
path = in_adapter.text
|
||||
if path.startswith(ENTRYUUID_PREFIX):
|
||||
# strip prefix from path
|
||||
zids.append(path[ENTRYUUID_PREFIX_LEN:])
|
||||
|
||||
if len(zids) != 1:
|
||||
raise ValueError('only IPA zones are supported: {}'.format(
|
||||
etree.tostring(zone_xml)))
|
||||
|
||||
return name, zids[0]
|
||||
|
||||
|
||||
class LDAPZoneListReader(ZoneListReader):
|
||||
def __init__(self):
|
||||
super(LDAPZoneListReader, self).__init__()
|
||||
|
||||
def process_ipa_zone(self, op, uuid, zone_ldap):
|
||||
assert (op == 'add' or op == 'del'), 'unsupported op %s' % op
|
||||
assert uuid is not None
|
||||
assert 'idnsname' in zone_ldap, \
|
||||
'LDAP zone UUID %s without idnsName' % uuid
|
||||
assert len(zone_ldap['idnsname']) == 1, \
|
||||
'LDAP zone UUID %s with len(idnsname) != 1' % uuid
|
||||
|
||||
if op == 'add':
|
||||
self._add_zone(zone_ldap['idnsname'][0], uuid)
|
||||
elif op == 'del':
|
||||
self._del_zone(zone_ldap['idnsname'][0], uuid)
|
||||
|
||||
|
||||
class ODSMgr(object):
|
||||
"""OpenDNSSEC zone manager. It does LDAP->ODS synchronization.
|
||||
|
||||
Zones with idnsSecInlineSigning attribute = TRUE in LDAP are added
|
||||
or deleted from ODS as necessary. ODS->LDAP key synchronization
|
||||
has to be solved seperatelly.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.zl_ldap = LDAPZoneListReader()
|
||||
|
||||
def ksmutil(self, params):
|
||||
"""Call ods-ksmutil with given parameters and return stdout.
|
||||
|
||||
Raises CalledProcessError if returncode != 0.
|
||||
"""
|
||||
cmd = ['ods-ksmutil'] + params
|
||||
result = ipautil.run(cmd, capture_output=True)
|
||||
return result.output
|
||||
|
||||
def get_ods_zonelist(self):
|
||||
stdout = self.ksmutil(['zonelist', 'export'])
|
||||
reader = ODSZoneListReader(stdout)
|
||||
return reader
|
||||
|
||||
def add_ods_zone(self, uuid, name):
|
||||
zone_path = '%s%s' % (ENTRYUUID_PREFIX, uuid)
|
||||
cmd = ['zone', 'add', '--zone', str(name), '--input', zone_path]
|
||||
output = self.ksmutil(cmd)
|
||||
logger.info('%s', output)
|
||||
self.notify_enforcer()
|
||||
|
||||
def del_ods_zone(self, name):
|
||||
# ods-ksmutil blows up if zone name has period at the end
|
||||
name = name.relativize(dns.name.root)
|
||||
# detect if name is root zone
|
||||
if name == dns.name.empty:
|
||||
name = dns.name.root
|
||||
cmd = ['zone', 'delete', '--zone', str(name)]
|
||||
output = self.ksmutil(cmd)
|
||||
logger.info('%s', output)
|
||||
self.notify_enforcer()
|
||||
self.cleanup_signer(name)
|
||||
|
||||
def notify_enforcer(self):
|
||||
cmd = ['notify']
|
||||
output = self.ksmutil(cmd)
|
||||
logger.info('%s', output)
|
||||
|
||||
def cleanup_signer(self, zone_name):
|
||||
cmd = ['ods-signer', 'ldap-cleanup', str(zone_name)]
|
||||
output = ipautil.run(cmd, capture_output=True)
|
||||
logger.info('%s', output)
|
||||
|
||||
def ldap_event(self, op, uuid, attrs):
|
||||
"""Record single LDAP event - zone addition or deletion.
|
||||
|
||||
Change is only recorded to memory.
|
||||
self.sync() have to be called to synchronize change to ODS."""
|
||||
assert op == 'add' or op == 'del'
|
||||
self.zl_ldap.process_ipa_zone(op, uuid, attrs)
|
||||
logger.debug("LDAP zones: %s", self.zl_ldap.mapping)
|
||||
|
||||
def sync(self):
|
||||
"""Synchronize list of zones in LDAP with ODS."""
|
||||
zl_ods = self.get_ods_zonelist()
|
||||
logger.debug("ODS zones: %s", zl_ods.mapping)
|
||||
removed = self.diff_zl(zl_ods, self.zl_ldap)
|
||||
logger.info("Zones removed from LDAP: %s", removed)
|
||||
added = self.diff_zl(self.zl_ldap, zl_ods)
|
||||
logger.info("Zones added to LDAP: %s", added)
|
||||
for (uuid, name) in removed:
|
||||
self.del_ods_zone(name)
|
||||
for (uuid, name) in added:
|
||||
self.add_ods_zone(uuid, name)
|
||||
|
||||
def diff_zl(self, s1, s2):
|
||||
"""Compute zones present in s1 but not present in s2.
|
||||
|
||||
Returns: List of (uuid, name) tuples with zones present only in s1."""
|
||||
s1_extra = s1.uuids - s2.uuids
|
||||
removed = [(uuid, name) for (uuid, name) in s1.mapping.items()
|
||||
if uuid in s1_extra]
|
||||
return removed
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ipa_log_manager.standard_logging_setup(debug=True)
|
||||
ods = ODSMgr()
|
||||
reader = ods.get_ods_zonelist()
|
||||
logger.info('ODS zones: %s', reader.mapping)
|
||||
@@ -1,114 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
"""
|
||||
This script implements a syncrepl consumer which syncs data from server
|
||||
to a local dict.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import ldap
|
||||
from ldap.cidict import cidict
|
||||
from ldap.ldapobject import ReconnectLDAPObject
|
||||
from ldap.syncrepl import SyncreplConsumer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
|
||||
"""
|
||||
Syncrepl Consumer interface
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# Initialise the LDAP Connection first
|
||||
ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs)
|
||||
# Now prepare the data store
|
||||
self.__data = cidict()
|
||||
self.__data['uuids'] = cidict()
|
||||
# We need this for later internal use
|
||||
self.__presentUUIDs = cidict()
|
||||
|
||||
def close_db(self):
|
||||
# This is useless for dict
|
||||
pass
|
||||
|
||||
def syncrepl_get_cookie(self):
|
||||
if 'cookie' in self.__data:
|
||||
cookie = self.__data['cookie']
|
||||
logger.debug('Current cookie is: %s', cookie)
|
||||
return cookie
|
||||
else:
|
||||
logger.debug('Current cookie is: None (not received yet)')
|
||||
|
||||
def syncrepl_set_cookie(self, cookie):
|
||||
logger.debug('New cookie is: %s', cookie)
|
||||
self.__data['cookie'] = cookie
|
||||
|
||||
def syncrepl_entry(self, dn, attributes, uuid):
|
||||
attributes = cidict(attributes)
|
||||
# First we determine the type of change we have here
|
||||
# (and store away the previous data for later if needed)
|
||||
previous_attributes = cidict()
|
||||
if uuid in self.__data['uuids']:
|
||||
change_type = 'modify'
|
||||
previous_attributes = self.__data['uuids'][uuid]
|
||||
else:
|
||||
change_type = 'add'
|
||||
# Now we store our knowledge of the existence of this entry
|
||||
# (including the DN as an attribute for convenience)
|
||||
attributes['dn'] = dn
|
||||
self.__data['uuids'][uuid] = attributes
|
||||
# Debugging
|
||||
logger.debug('Detected %s of entry: %s %s', change_type, dn, uuid)
|
||||
if change_type == 'modify':
|
||||
self.application_sync(uuid, dn, attributes, previous_attributes)
|
||||
else:
|
||||
self.application_add(uuid, dn, attributes)
|
||||
|
||||
def syncrepl_delete(self, uuids):
|
||||
# Make sure we know about the UUID being deleted, just in case...
|
||||
uuids = [uuid for uuid in uuids if uuid in self.__data['uuids']]
|
||||
# Delete all the UUID values we know of
|
||||
for uuid in uuids:
|
||||
attributes = self.__data['uuids'][uuid]
|
||||
dn = attributes['dn']
|
||||
logger.debug('Detected deletion of entry: %s %s', dn, uuid)
|
||||
self.application_del(uuid, dn, attributes)
|
||||
del self.__data['uuids'][uuid]
|
||||
|
||||
def syncrepl_present(self, uuids, refreshDeletes=False):
|
||||
# If we have not been given any UUID values,
|
||||
# then we have received all the present controls...
|
||||
if uuids is None:
|
||||
# We only do things if refreshDeletes is false
|
||||
# as the syncrepl extension will call syncrepl_delete instead
|
||||
# when it detects a delete notice
|
||||
if refreshDeletes is False:
|
||||
deletedEntries = [uuid for uuid in self.__data['uuids'].keys()
|
||||
if uuid not in self.__presentUUIDs]
|
||||
self.syncrepl_delete(deletedEntries)
|
||||
# Phase is now completed, reset the list
|
||||
self.__presentUUIDs = {}
|
||||
else:
|
||||
# Note down all the UUIDs we have been sent
|
||||
for uuid in uuids:
|
||||
self.__presentUUIDs[uuid] = True
|
||||
|
||||
def application_add(self, uuid, dn, attributes):
|
||||
logger.info('Performing application add for: %s %s', dn, uuid)
|
||||
logger.debug('New attributes: %s', attributes)
|
||||
return True
|
||||
|
||||
def application_sync(self, uuid, dn, attributes, previous_attributes):
|
||||
logger.info('Performing application sync for: %s %s', dn, uuid)
|
||||
logger.debug('Old attributes: %s', previous_attributes)
|
||||
logger.debug('New attributes: %s', attributes)
|
||||
return True
|
||||
|
||||
def application_del(self, uuid, dn, previous_attributes):
|
||||
logger.info('Performing application delete for: %s %s', dn, uuid)
|
||||
logger.debug('Old attributes: %s', previous_attributes)
|
||||
return True
|
||||
@@ -1,22 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
import errno
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
class TemporaryDirectory(object):
|
||||
def __init__(self, root):
|
||||
self.root = root
|
||||
|
||||
def __enter__(self):
|
||||
self.name = tempfile.mkdtemp(dir=self.root)
|
||||
return self.name
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
try:
|
||||
shutil.rmtree(self.name)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
Reference in New Issue
Block a user