Репозиторий Sisyphus
Последнее обновление: 1 октября 2023 | Пакетов: 18631 | Посещений: 37404025
en ru br
Репозитории ALT

Группа :: Система/Серверы
Пакет: 389-ds-base

 Главная   Изменения   Спек   Патчи   Sources   Загрузить   Gear   Bugs and FR  Repocop 

Патч: 389-ds-base-2.2.9-alt.patch
Скачать


 Makefile.am                              |   2 +-
 src/lib389/lib389/perftools.py           | 568 -------------------------------
 src/lib389/lib389/perftools/__init__.py  | 568 +++++++++++++++++++++++++++++++
 src/lib389/lib389/topologies.py          | 561 ------------------------------
 src/lib389/lib389/topologies/__init__.py | 561 ++++++++++++++++++++++++++++++
 src/lib389/setup.py                      |  11 +-
 6 files changed, 1140 insertions(+), 1131 deletions(-)
diff --git a/Makefile.am b/Makefile.am
index b34947869..f3a7da85c 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1167,7 +1167,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
 	$(libavl_a_SOURCES)
 
 libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) $(DB_INC) $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SVRCORE_INCLUDES)
-libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsvrcore.la $(RSLAPD_LIB)
+libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsvrcore.la $(RSLAPD_LIB) -lm
 # If asan is enabled, it creates special libcrypt interceptors. However, they are
 # detected by the first load of libasan at runtime, and what is in the linked lib
 # so we need libcrypt to be present as soon as libasan is loaded for the interceptors
diff --git a/src/lib389/lib389/perftools.py b/src/lib389/lib389/perftools.py
deleted file mode 100644
index 078acb015..000000000
--- a/src/lib389/lib389/perftools.py
+++ /dev/null
@@ -1,568 +0,0 @@
-#!/usr/bin/python3
-
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2021 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-
-#import os
-#import os.path
-#import ldap
-import sys
-import re
-import string
-import logging
-import subprocess
-import multiprocessing
-import time
-import json
-import statistics
-from random import shuffle, seed, randint, choice
-from lib389._constants import *
-from lib389.properties import *
-from lib389.idm.user import UserAccounts
-from lib389.topologies import create_topology
-from lib389 import DirSrv
-from lib389.config import LMDB_LDBMConfig
-from lib389.utils import get_default_db_lib
-from pathlib import Path, PosixPath
-
-class IdGenerator:
-    # Generates up to nbids unique identifiers
-
-    def __init__(self, nbids):
-        self._idx = 0
-        self._nbids = nbids
-
-    def __iter__(self):
-        self._idx = 0
-        return self
-
-    def _idFromIndex(self):
-        return self._formatId()
-
-    def _formatId(self, id):
-        # Should be overwritten in subclass
-        # Should return an id
-        return str(id)
-
-    def __next__(self):
-        if (self._idx >= self._nbids):
-            raise StopIteration
-        self._idx += 1
-        return self._formatId(self._idx-1)
-
-    def getIdx(self):
-        return self._idx
-
-    def random(self):
-        self._idx = randint(0, self._nbids-1);
-        return self._formatId(self._idx)
-
-class IdGeneratorWithNames(IdGenerator):
-    # Generates up to nbids unique identifiers as names
-    # for perf reason a small (self._list_size) number of unique identifier is generated
-    # then ids from this list are concat level time (insuring the final id is unique)
-
-    # Generates up to self._list_size power level unique identifiers
-    # for perf reason a small (self._list_size) number of unique identifier is generated
-    # then ids from this list are concat level time (insuring the final id is unique)
-    def __init__(self, nbids):
-        super().__init__(nbids)
-        self._voyelles = [ 'a', 'e', 'i', 'o', 'u', 'ai', 'an', 'au', 'en', 'ei', 'en', 'eu', 'in', 'on', 'ou' ]
-        self._consonnes = [ 'b', 'c', 'ch', 'cr', 'd', 'f', 'g', 'j', 'l', 'm', 'n', 'p', 'ph', 'qu', 'r', 's', 't', 'v' ]
-        self._syllabs = [c+v for c in self._consonnes for v in self._voyelles]
-        shuffle(self._syllabs)
-        self._level = 0
-        self._syllabsLen = len(self._syllabs)
-        while (nbids > 0):
-            self._level = self._level+1
-            nbids = int (nbids / self._syllabsLen)
-
-    def _formatId(self, idx):
-        id = ""
-        for i in range(self._level):
-            id += self._syllabs[int(idx % self._syllabsLen)]
-            idx /= self._syllabsLen
-        return id.capitalize()
-
-class IdGeneratorWithNumbers(IdGenerator):
-    # Generates up to nbids unique identifiers as numbers
-
-    @staticmethod
-    def formatId(idx):
-        return f'{idx:0>10d}'
-
-    def _formatId(self, idx):
-        return IdGeneratorWithNumbers.formatId(idx)
-
-
-class CsvFile:
-    # Helper to write simple csv files
-
-    def __init__(self, fname, width):
-        self.fname = fname
-        self.f = None
-        self.width = width
-        self.pos = 1
-        self.line = ""
-        self.sep = ";"
-        self.lineid = 1
-
-    def __enter__(self):
-        self.f = open(self.fname, "w") if self.fname else sys.stdout
-        return self
-
-    def __exit__(self, type, value, tb):
-        if (self.f != sys.stdout):
-            self.f.close()
-        self.f = None
-
-    def nf(self, str):
-        if not str:
-            str=""
-        self.line += f"{str}{self.sep}"
-        self.pos += 1
-
-    def nl(self):
-        while self.pos < self.width:
-            self.nf(None)
-        self.line += "\n"
-        self.f.write(self.line)
-        self.line = ""
-        self.pos = 1
-        self.lineid += 1
-
-    def n(self, v):
-        # Get name of spreadsheet column
-        if (v == 0):
-            return ""
-        return chr(0x40+v)
-
-    def ref(self, dpl):
-        colid = self.pos + dpl - 1
-        return f"{self.n(int(colid/26))}{self.n(colid%26+1)}{self.lineid}"
-
-class PerformanceTools:
-
-    def __init__(self, options = {}):
-        prefix = os.path.join(os.environ.get('PREFIX', ""))
-        perfdir= f"{prefix}/var/log/dirsrv/perfdir"
-        print(f"Results and logs are stored in {perfdir} directory.")
-        self._options = {
-            'nbUsers' : 10000,
-            'seed' : 'lib389PerfTools',
-            'resultDir' : perfdir,
-            'suffix' : DEFAULT_SUFFIX,
-            **options
-        }
-        seed(self._options['seed'])
-        self._instance = None
-        os.makedirs(perfdir, mode=0o755, exist_ok = True)
-        self._ldclt_template = self.getFilePath("template.ldclt");
-        # Generate a dummy template anyway we do not plan to create entries
-        with open(self._ldclt_template, "w") as f:
-            f.write("objectclass: inetOrgPerson\n");
-        self._users_parents_dn = f"ou=People,{self._options['suffix']}"
-
-    @staticmethod
-    def log2Csv(fname, fout):
-        # Convert (verbose) log file into csv file  (easier for comparing the results)
-        map={}    # ( nb_users, name, nbthreads, db_lib) Tuple to Result map
-        names={}  # { Name : None } Map
-        has_threads={} # { Name : { threads : { users : users } } } Map
-        # Read log file
-        maxmes=0
-        with open(fname) as f:
-            for line in f:
-                if (line[0] != '{'):
-                    continue
-                if (line[-1] == '\n'):
-                    line = line[:-1]
-                res = eval(line.replace('\n','\\n'))
-                nb_users = res['nbUsers']
-                db_lib = res['db_lib']
-                name = res['measure_name']
-                names[name] = None
-                try:
-                    nbthreads = res['nb_threads']
-                except KeyError:
-                    nbthreads = ""
-                if not name in has_threads:
-                    has_threads[name] = {}
-                if not nbthreads in has_threads[name]:
-                    has_threads[name][nbthreads] = {}
-                has_threads[name][nbthreads][nb_users] = nb_users
-                key = ( nb_users, name, nbthreads, db_lib)
-                if not key in map:
-                    map[key] = []
-                m = map[key]
-                m.append(res)
-                if maxmes < len(m):
-                    maxmes = len(m)
-        # Displays the result: by test name then by thread number then by users number
-        # Generates all combinations
-        keys=[]
-        for name in sorted(names.keys()):
-            for t in sorted(has_threads[name].keys()):
-                for user in sorted(has_threads[name][t].keys()):
-                    keys.append((user, name, t))
-        #Generates the csv file
-        sep=";"
-        with CsvFile(fout, 2*maxmes + 2) as csv:
-            csv.nf("test name");
-            csv.nf("threads");
-            csv.nf("users");
-            for idx in range(maxmes):
-                csv.nf("bdb");
-                csv.nf("mdb");
-                csv.nf("%");
-            csv.nl();
-            for k in keys:
-                csv.nf(f"{k[1]}")
-                csv.nf(f"{k[2]}")
-                csv.nf(f"{k[0]}")
-                k0 = ( k[0], k[1], k[2], "bdb" )
-                k1 = ( k[0], k[1], k[2], "mdb" )
-                for idx in range(maxmes):
-                    if k0 in map and idx < len(map[k0]):
-                        res = map[k0][idx]
-                        csv.nf(res['safemean'])
-                    else:
-                        csv.nf(None)
-                    if k1 in map and idx < len(map[k1]):
-                        res = map[k1][idx]
-                        csv.nf(res['safemean'])
-                    else:
-                        csv.nf(None)
-                    # Add % formula
-                    csv.nf(f"=({csv.ref(-1)}-{csv.ref(-2)})/{csv.ref(-2)}")
-                csv.nl();
-
-    def getFilePath(self, filename):
-        return os.path.join(self._options['resultDir'], filename)
-
-    def log(self, filename, msg):
-        with open(self.getFilePath(filename), "at") as f:
-            f.write(str(msg))
-            f.write("\n")
-
-    def initInstance(self):
-        if (self._instance):
-            return self._instance;
-        uidpath = self.getFilePath("uids")
-        nb_uids = 0
-        try:
-            with open(uidpath, 'r') as f:
-                while f.readline():
-                    nb_uids += 1
-        except FileNotFoundError:
-            pass
-        nb_users = self._options['nbUsers']
-        need_rebuild = True
-        if (nb_uids == nb_users):
-            # Lets try to reuse existing instance
-            try :
-                self._instance = DirSrv(verbose=True)
-                self._instance.local_simple_allocate(serverid="standalone1", password=PW_DM)
-                self._instance.open()
-                if (self._instance.exists()):
-                    if (self._instance.get_db_lib() == get_default_db_lib()):
-                        need_rebuild = False
-                    else:
-                        print (f"db is {self._instance.get_db_lib()} instead of {get_default_db_lib()} ==> instance must be rebuild")
-                else:
-                    print (f"missing instance ==> instance must be rebuild")
-            except Exception:
-                pass
-        else:
-            print (f"Instance has {nb_uids} users instead of {nb_users} ==> instance must be rebuild")
-        if (need_rebuild):
-            print ("Rebuilding standalone1 instance")
-            # Should rebuild the instance from scratch
-            topology = create_topology({ReplicaRole.STANDALONE: 1})
-            self._instance = topology.standalone
-            #  Adjust db size if needed (i.e about 670 K users)
-            defaultDBsize = 1073741824
-            entrySize =  2000 # Real size is around 1525 but got error with 1800 (likely due to some recent changes in entries)
-            if (self._instance.get_db_lib() == "mdb" and
-                    nb_users * entrySize > defaultDBsize):
-                mdb_config = LMDB_LDBMConfig(self._instance)
-                mdb_config.replace("nsslapd-mdb-max-size", str(nb_users * entrySize))
-                self._instance.restart()
-            # Then populate the users
-            useraccounts = UserAccounts(self._instance, self._options['suffix'])
-            with open(uidpath, 'w') as f:
-                uidgen = IdGeneratorWithNumbers(nb_users)
-                cnGen = IdGeneratorWithNames(100)
-                snGen = IdGeneratorWithNames(100)
-
-                for uid in uidgen:
-                    cn = cnGen.random()
-                    sn = snGen.random()
-                    rdn = f"uid={uid}"
-                    osuid = uidgen.getIdx() + 1000
-                    osgid = int (osuid % 100) + 1000
-                    properties = {
-                        'uid': uid,
-                        'cn': cn,
-                        'sn': sn,
-                        'uidNumber': str(osuid),
-                        'gidNumber': str(osgid),
-                        'homeDirectory': f'/home/{uid}'
-                    }
-                    super(UserAccounts, useraccounts).create(rdn, properties)
-                    f.write(f'{uid}\n')
-        return self._instance;
-
-    @staticmethod
-    def filterMeasures(values, m, ecart):
-        # keep values around m
-        r = []
-        for val in values:
-            if (val > (1 - ecart) * m and val < (1 + ecart) * m):
-                r.append(val)
-        return r
-
-    def safeMeasures(self, values, ecart=0.2):
-        v = values
-        try:
-            r = PerformanceTools.filterMeasures(values, statistics.mean(v) , ecart)
-            while ( r != v ):
-                v = r
-                r = PerformanceTools.filterMeasures(values, statistics.mean(v) , ecart)
-                if (len(r) == 0):
-                    return values
-            return r
-        except statistics.StatisticsError as e:
-            self.log("log", str(e))
-            print(e)
-            return values
-
-    # Return a dict about the evironment data
-    def getEnvInfo(self):
-        mem = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024.**3)
-        with open ('/etc/redhat-release') as f:
-            release = f.read()
-        return {
-            "db_lib" : self._instance.get_db_lib(),
-            "nb_cpus" : multiprocessing.cpu_count(),
-            "total mem" : mem,
-            "release" : str(release),
-            **self._options
-        }
-
-    def finalizeResult(self, res):
-        try:
-            rawres = res["rawresults"]
-            res["rawmean"] = statistics.mean(rawres)
-            res["saferesults"] = self.safeMeasures(rawres) # discard first measure result
-            res["safemean"] = statistics.mean(res["saferesults"])
-            pretty_res_keys = [ 'start_time', 'stop_time', 'measure_name', 'safemean', 'db_lib', 'nbUsers', 'nb_threads' ]
-            pretty_res = dict(filter(lambda elem: elem[0] in pretty_res_keys, res.items()))
-        except statistics.StatisticsError as e:
-            print(e)
-            res["exception"] = e
-            pretty_res = "#ERROR"
-        res["pretty"] = pretty_res
-        self.log("out", res["pretty"])
-        self.log("log", res)
-        return res
-
-    def ldclt(self, measure_name, args, nbThreads=10, nbMes=10):
-        # First ldclt measure is always bad so do 1 measure more
-        # and discard it from final result
-        nbMes += 1
-
-        prog = os.path.join(self._instance.ds_paths.bin_dir, 'ldclt')
-        cmd = [ prog,
-            '-h',
-            f'{self._instance.host}',
-            '-p',
-            f'{self._instance.port}',
-            '-D',
-            f'{self._instance.binddn}',
-            '-w',
-            f'{self._instance.bindpw}',
-            '-N', str(nbMes),
-            '-n', str(nbThreads) ]
-        for key in args.keys():
-            cmd.append(str(key))
-            val = args[key]
-            if (val):
-                cmd.append(str(val))
-        start_time = time.time()
-        tmout = 30+10*nbMes
-        print (f"Running ldclt with a timeout of {tmout} seconds ...\r")
-        try:
-            result = subprocess.run(args=cmd, capture_output=True, timeout=tmout)
-        except subprocess.CalledProcessError as e:
-            self.log("log", f'{e.cmd} failed.  measure: {measure_name}\n' +
-                      f'instance: {self._instance.serverid}\n' +
-                      f'return code is {e.returncode}.\n' +
-                      f'stdout: {e.stdout}\n' +
-                      f'stderr: {e.stderr}\n' )
-            raise e
-        print (" Done.")
-        stop_time = time.time()
-        # Lets parse the result
-        res = { "measure_name" : measure_name,
-                "cmd" : cmd,
-                "stdout" : result.stdout,
-                "stderr" : result.stderr,
-                "returncode" : result.returncode,
-                "start_time" : start_time,
-                "stop_time" : stop_time,
-                "stop_time" : stop_time,
-                "nb_threads" : nbThreads,
-                **self.getEnvInfo() }
-        rawres = re.findall(r'Average rate: [^ ]*\s*.([^/]*)', str(result.stdout))
-        rawres = [float(i) for i in rawres]
-        res["measure0"] = rawres[0]
-        res["rawresults"] = rawres[1:]   # Discard first measure
-        return self.finalizeResult(res)
-
-    def measure_search_by_uid(self, name, nb_threads = 1):
-        nb_users = self._options['nbUsers']
-        args  = { "-b" : self._users_parents_dn,
-                  "-f" : "uid=XXXXXXXXXX",
-                  "-e" : "esearch,random",
-                  "-r0" : None,
-                  f"-R{nb_users-1}" : None }
-        return self.ldclt(name, args, nbThreads=nb_threads)
-
-    # I wish I could make the base dn vary rather than use the dn in filter
-    # but I did not find how to do that (the RDN trick as in modify
-    #  generates the same search than measure_search_by_uid test)
-    def measure_search_by_filtering_the_dn(self, name, nb_threads = 1):
-        nb_users = self._options['nbUsers']
-        args  = { "-b" : self._users_parents_dn,
-                  "-f" : "uid:dn:=XXXXXXXXXX",
-                  "-e" : "esearch,random",
-                  "-r0" : None,
-                  f"-R{nb_users-1}" : None }
-        return self.ldclt(name, args, nbThreads=nb_threads)
-
-    def measure_modify(self, name, nb_threads = 1):
-        nb_users = self._options['nbUsers']
-        args  = { "-b" : self._users_parents_dn,
-                  "-e" : f"rdn=uid:[RNDN(0;{nb_users-1};10)],object={self._ldclt_template},attreplace=sn: random modify XXXXX" }
-        return self.ldclt(name, args, nbThreads=nb_threads)
-
-    def offline_export(self):
-        start_time = time.time()
-        assert (self._instance.db2ldif(DEFAULT_BENAME, (self._options['suffix'],), None, None, None, self._ldif))
-        stop_time = time.time()
-        # Count entries in ldif file (if not already done)
-        if not self._nbEntries:
-            self._nbEntries = 0
-            with open(self._ldif) as f:
-                for line in f:
-                    if (line.startswith("dn:")):
-                        self._nbEntries += 1
-        return self._nbEntries / (stop_time - start_time)
-
-    def offline_import(self):
-        start_time = time.time()
-        assert (self._instance.ldif2db(DEFAULT_BENAME, None, None, None, self._ldif))
-        stop_time = time.time()
-        return self._nbEntries / (stop_time - start_time)
-
-    def _do_measure(self, measure_name, measure_cb, nbMes):
-        # Perform non ldcltl measure
-        #
-        first_time = time.time()
-        rawres = []
-        for m in range(nbMes):
-            try:
-                rawres.append( measure_cb() )
-                stop_time = time.time()
-            except AssertionError:
-                continue
-        last_time = time.time()
-        # Lets parse the result
-        res = { "measure_name" : measure_name,
-                "start_time" : first_time,
-                "stop_time" : last_time,
-                "nb_measures" : nbMes,
-                "rawresults" : rawres,
-                **self.getEnvInfo() }
-        return self.finalizeResult(res)
-
-    def mesure_export_import(self, nbMes=10):
-        self._instance.stop()
-        self._ldif = self.getFilePath("db.ldif");
-        self._nbEntries = None
-        res = [ self._do_measure("export", self.offline_export, nbMes), self._do_measure("import", self.offline_import, nbMes) ]
-        self._instance.start()
-        return res;
-
-    class Tester:
-        # Basic tester (used to define ldclt tests)
-        def __init__(self, name, description, method_name):
-            self._base_name = name
-            self._base_description = description
-            self._method_name = method_name
-
-        def name(self):
-            return self._base_name
-
-        def argsused(self):
-            return [ "nb_threads", "name" ]
-
-        def description(self):
-            return self._base_description
-
-        def run(self, perftools, args):
-            args['name'] = self._base_name
-            res = getattr(perftools, self._method_name)(self._base_name, nb_threads=args['nb_threads']);
-            print (res['pretty'])
-
-        @staticmethod
-        def initTester(args):
-            os.environ["NSSLAPD_DB_LIB"] = args['db_lib']
-            perftools = PerformanceTools( args )
-            perftools.initInstance()
-            return perftools;
-
-    class TesterImportExport(Tester):
-        # A special tester for export/import
-        def __init__(self):
-            super().__init__("export/import",
-                "Measure export rate in entries per seconds then measure import rate.",
-                 None)
-
-        def argsused(self):
-            return []
-
-        def run(self, perftools, args=None):
-            res = perftools.mesure_export_import()
-            for r in res:
-                print (r['pretty'])
-
-    @staticmethod
-    def listTests():
-        # List of test for which args.nb_threads is useful
-        return { t.name() :  t for t in [
-            PerformanceTools.Tester("search_uid", "Measure number of searches per seconds using filter with random existing uid.", "measure_search_by_uid"),
-            PerformanceTools.Tester("search_uid_in_dn", "Measure number of searches per seconds using filter with random existing uid in dn (i.e: (uid:dn:uid_value)).", "measure_search_by_filtering_the_dn"),
-            PerformanceTools.Tester("modify_sn", "Measure number of modify per seconds replacing sn by random value on random entries.", "measure_modify"),
-            PerformanceTools.TesterImportExport(),
-        ] }
-
-    @staticmethod
-    def runAllTests(options):
-        for users in ( 100, 1000, 10000, 100000, 1000000 ):
-            for db in ( 'bdb', 'mdb' ):
-                perftools = PerformanceTools.Tester.initTester({**options, 'nbUsers': users, 'db_lib': db})
-                for t in PerformanceTools.listTests().values():
-                    if 'nb_threads' in t.argsused():
-                        for nbthreads in ( 1, 4, 8 ):
-                            t.run(perftools, { "nb_threads" : nbthreads })
-                    else:
-                        t.run(perftools)
-
-
diff --git a/src/lib389/lib389/perftools/__init__.py b/src/lib389/lib389/perftools/__init__.py
new file mode 100644
index 000000000..078acb015
--- /dev/null
+++ b/src/lib389/lib389/perftools/__init__.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python3
+
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+#import os
+#import os.path
+#import ldap
+import sys
+import re
+import string
+import logging
+import subprocess
+import multiprocessing
+import time
+import json
+import statistics
+from random import shuffle, seed, randint, choice
+from lib389._constants import *
+from lib389.properties import *
+from lib389.idm.user import UserAccounts
+from lib389.topologies import create_topology
+from lib389 import DirSrv
+from lib389.config import LMDB_LDBMConfig
+from lib389.utils import get_default_db_lib
+from pathlib import Path, PosixPath
+
+class IdGenerator:
+    # Generates up to nbids unique identifiers
+
+    def __init__(self, nbids):
+        self._idx = 0
+        self._nbids = nbids
+
+    def __iter__(self):
+        self._idx = 0
+        return self
+
+    def _idFromIndex(self):
+        return self._formatId()
+
+    def _formatId(self, id):
+        # Should be overwritten in subclass
+        # Should return an id
+        return str(id)
+
+    def __next__(self):
+        if (self._idx >= self._nbids):
+            raise StopIteration
+        self._idx += 1
+        return self._formatId(self._idx-1)
+
+    def getIdx(self):
+        return self._idx
+
+    def random(self):
+        self._idx = randint(0, self._nbids-1);
+        return self._formatId(self._idx)
+
+class IdGeneratorWithNames(IdGenerator):
+    # Generates up to nbids unique identifiers as names
+    # for perf reason a small (self._list_size) number of unique identifier is generated
+    # then ids from this list are concat level time (insuring the final id is unique)
+
+    # Generates up to self._list_size power level unique identifiers
+    # for perf reason a small (self._list_size) number of unique identifier is generated
+    # then ids from this list are concat level time (insuring the final id is unique)
+    def __init__(self, nbids):
+        super().__init__(nbids)
+        self._voyelles = [ 'a', 'e', 'i', 'o', 'u', 'ai', 'an', 'au', 'en', 'ei', 'en', 'eu', 'in', 'on', 'ou' ]
+        self._consonnes = [ 'b', 'c', 'ch', 'cr', 'd', 'f', 'g', 'j', 'l', 'm', 'n', 'p', 'ph', 'qu', 'r', 's', 't', 'v' ]
+        self._syllabs = [c+v for c in self._consonnes for v in self._voyelles]
+        shuffle(self._syllabs)
+        self._level = 0
+        self._syllabsLen = len(self._syllabs)
+        while (nbids > 0):
+            self._level = self._level+1
+            nbids = int (nbids / self._syllabsLen)
+
+    def _formatId(self, idx):
+        id = ""
+        for i in range(self._level):
+            id += self._syllabs[int(idx % self._syllabsLen)]
+            idx /= self._syllabsLen
+        return id.capitalize()
+
+class IdGeneratorWithNumbers(IdGenerator):
+    # Generates up to nbids unique identifiers as numbers
+
+    @staticmethod
+    def formatId(idx):
+        return f'{idx:0>10d}'
+
+    def _formatId(self, idx):
+        return IdGeneratorWithNumbers.formatId(idx)
+
+
+class CsvFile:
+    # Helper to write simple csv files
+
+    def __init__(self, fname, width):
+        self.fname = fname
+        self.f = None
+        self.width = width
+        self.pos = 1
+        self.line = ""
+        self.sep = ";"
+        self.lineid = 1
+
+    def __enter__(self):
+        self.f = open(self.fname, "w") if self.fname else sys.stdout
+        return self
+
+    def __exit__(self, type, value, tb):
+        if (self.f != sys.stdout):
+            self.f.close()
+        self.f = None
+
+    def nf(self, str):
+        if not str:
+            str=""
+        self.line += f"{str}{self.sep}"
+        self.pos += 1
+
+    def nl(self):
+        while self.pos < self.width:
+            self.nf(None)
+        self.line += "\n"
+        self.f.write(self.line)
+        self.line = ""
+        self.pos = 1
+        self.lineid += 1
+
+    def n(self, v):
+        # Get name of spreadsheet column
+        if (v == 0):
+            return ""
+        return chr(0x40+v)
+
+    def ref(self, dpl):
+        colid = self.pos + dpl - 1
+        return f"{self.n(int(colid/26))}{self.n(colid%26+1)}{self.lineid}"
+
+class PerformanceTools:
+
+    def __init__(self, options = {}):
+        prefix = os.path.join(os.environ.get('PREFIX', ""))
+        perfdir= f"{prefix}/var/log/dirsrv/perfdir"
+        print(f"Results and logs are stored in {perfdir} directory.")
+        self._options = {
+            'nbUsers' : 10000,
+            'seed' : 'lib389PerfTools',
+            'resultDir' : perfdir,
+            'suffix' : DEFAULT_SUFFIX,
+            **options
+        }
+        seed(self._options['seed'])
+        self._instance = None
+        os.makedirs(perfdir, mode=0o755, exist_ok = True)
+        self._ldclt_template = self.getFilePath("template.ldclt");
+        # Generate a dummy template anyway we do not plan to create entries
+        with open(self._ldclt_template, "w") as f:
+            f.write("objectclass: inetOrgPerson\n");
+        self._users_parents_dn = f"ou=People,{self._options['suffix']}"
+
+    @staticmethod
+    def log2Csv(fname, fout):
+        # Convert (verbose) log file into csv file  (easier for comparing the results)
+        map={}    # ( nb_users, name, nbthreads, db_lib) Tuple to Result map
+        names={}  # { Name : None } Map
+        has_threads={} # { Name : { threads : { users : users } } } Map
+        # Read log file
+        maxmes=0
+        with open(fname) as f:
+            for line in f:
+                if (line[0] != '{'):
+                    continue
+                if (line[-1] == '\n'):
+                    line = line[:-1]
+                res = eval(line.replace('\n','\\n'))
+                nb_users = res['nbUsers']
+                db_lib = res['db_lib']
+                name = res['measure_name']
+                names[name] = None
+                try:
+                    nbthreads = res['nb_threads']
+                except KeyError:
+                    nbthreads = ""
+                if not name in has_threads:
+                    has_threads[name] = {}
+                if not nbthreads in has_threads[name]:
+                    has_threads[name][nbthreads] = {}
+                has_threads[name][nbthreads][nb_users] = nb_users
+                key = ( nb_users, name, nbthreads, db_lib)
+                if not key in map:
+                    map[key] = []
+                m = map[key]
+                m.append(res)
+                if maxmes < len(m):
+                    maxmes = len(m)
+        # Displays the result: by test name then by thread number then by users number
+        # Generates all combinations
+        keys=[]
+        for name in sorted(names.keys()):
+            for t in sorted(has_threads[name].keys()):
+                for user in sorted(has_threads[name][t].keys()):
+                    keys.append((user, name, t))
+        #Generates the csv file
+        sep=";"
+        with CsvFile(fout, 2*maxmes + 2) as csv:
+            csv.nf("test name");
+            csv.nf("threads");
+            csv.nf("users");
+            for idx in range(maxmes):
+                csv.nf("bdb");
+                csv.nf("mdb");
+                csv.nf("%");
+            csv.nl();
+            for k in keys:
+                csv.nf(f"{k[1]}")
+                csv.nf(f"{k[2]}")
+                csv.nf(f"{k[0]}")
+                k0 = ( k[0], k[1], k[2], "bdb" )
+                k1 = ( k[0], k[1], k[2], "mdb" )
+                for idx in range(maxmes):
+                    if k0 in map and idx < len(map[k0]):
+                        res = map[k0][idx]
+                        csv.nf(res['safemean'])
+                    else:
+                        csv.nf(None)
+                    if k1 in map and idx < len(map[k1]):
+                        res = map[k1][idx]
+                        csv.nf(res['safemean'])
+                    else:
+                        csv.nf(None)
+                    # Add % formula
+                    csv.nf(f"=({csv.ref(-1)}-{csv.ref(-2)})/{csv.ref(-2)}")
+                csv.nl();
+
+    def getFilePath(self, filename):
+        return os.path.join(self._options['resultDir'], filename)
+
+    def log(self, filename, msg):
+        with open(self.getFilePath(filename), "at") as f:
+            f.write(str(msg))
+            f.write("\n")
+
+    def initInstance(self):
+        if (self._instance):
+            return self._instance;
+        uidpath = self.getFilePath("uids")
+        nb_uids = 0
+        try:
+            with open(uidpath, 'r') as f:
+                while f.readline():
+                    nb_uids += 1
+        except FileNotFoundError:
+            pass
+        nb_users = self._options['nbUsers']
+        need_rebuild = True
+        if (nb_uids == nb_users):
+            # Lets try to reuse existing instance
+            try :
+                self._instance = DirSrv(verbose=True)
+                self._instance.local_simple_allocate(serverid="standalone1", password=PW_DM)
+                self._instance.open()
+                if (self._instance.exists()):
+                    if (self._instance.get_db_lib() == get_default_db_lib()):
+                        need_rebuild = False
+                    else:
+                        print (f"db is {self._instance.get_db_lib()} instead of {get_default_db_lib()} ==> instance must be rebuild")
+                else:
+                    print (f"missing instance ==> instance must be rebuild")
+            except Exception:
+                pass
+        else:
+            print (f"Instance has {nb_uids} users instead of {nb_users} ==> instance must be rebuild")
+        if (need_rebuild):
+            print ("Rebuilding standalone1 instance")
+            # Should rebuild the instance from scratch
+            topology = create_topology({ReplicaRole.STANDALONE: 1})
+            self._instance = topology.standalone
+            #  Adjust db size if needed (i.e about 670 K users)
+            defaultDBsize = 1073741824
+            entrySize =  2000 # Real size is around 1525 but got error with 1800 (likely due to some recent changes in entries)
+            if (self._instance.get_db_lib() == "mdb" and
+                    nb_users * entrySize > defaultDBsize):
+                mdb_config = LMDB_LDBMConfig(self._instance)
+                mdb_config.replace("nsslapd-mdb-max-size", str(nb_users * entrySize))
+                self._instance.restart()
+            # Then populate the users
+            useraccounts = UserAccounts(self._instance, self._options['suffix'])
+            with open(uidpath, 'w') as f:
+                uidgen = IdGeneratorWithNumbers(nb_users)
+                cnGen = IdGeneratorWithNames(100)
+                snGen = IdGeneratorWithNames(100)
+
+                for uid in uidgen:
+                    cn = cnGen.random()
+                    sn = snGen.random()
+                    rdn = f"uid={uid}"
+                    osuid = uidgen.getIdx() + 1000
+                    osgid = int (osuid % 100) + 1000
+                    properties = {
+                        'uid': uid,
+                        'cn': cn,
+                        'sn': sn,
+                        'uidNumber': str(osuid),
+                        'gidNumber': str(osgid),
+                        'homeDirectory': f'/home/{uid}'
+                    }
+                    super(UserAccounts, useraccounts).create(rdn, properties)
+                    f.write(f'{uid}\n')
+        return self._instance;
+
+    @staticmethod
+    def filterMeasures(values, m, ecart):
+        # keep values around m
+        r = []
+        for val in values:
+            if (val > (1 - ecart) * m and val < (1 + ecart) * m):
+                r.append(val)
+        return r
+
+    def safeMeasures(self, values, ecart=0.2):
+        v = values
+        try:
+            r = PerformanceTools.filterMeasures(values, statistics.mean(v) , ecart)
+            while ( r != v ):
+                v = r
+                r = PerformanceTools.filterMeasures(values, statistics.mean(v) , ecart)
+                if (len(r) == 0):
+                    return values
+            return r
+        except statistics.StatisticsError as e:
+            self.log("log", str(e))
+            print(e)
+            return values
+
+    # Return a dict about the evironment data
+    def getEnvInfo(self):
+        mem = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024.**3)
+        with open ('/etc/redhat-release') as f:
+            release = f.read()
+        return {
+            "db_lib" : self._instance.get_db_lib(),
+            "nb_cpus" : multiprocessing.cpu_count(),
+            "total mem" : mem,
+            "release" : str(release),
+            **self._options
+        }
+
+    def finalizeResult(self, res):
+        try:
+            rawres = res["rawresults"]
+            res["rawmean"] = statistics.mean(rawres)
+            res["saferesults"] = self.safeMeasures(rawres) # discard first measure result
+            res["safemean"] = statistics.mean(res["saferesults"])
+            pretty_res_keys = [ 'start_time', 'stop_time', 'measure_name', 'safemean', 'db_lib', 'nbUsers', 'nb_threads' ]
+            pretty_res = dict(filter(lambda elem: elem[0] in pretty_res_keys, res.items()))
+        except statistics.StatisticsError as e:
+            print(e)
+            res["exception"] = e
+            pretty_res = "#ERROR"
+        res["pretty"] = pretty_res
+        self.log("out", res["pretty"])
+        self.log("log", res)
+        return res
+
+    def ldclt(self, measure_name, args, nbThreads=10, nbMes=10):
+        # First ldclt measure is always bad so do 1 measure more
+        # and discard it from final result
+        nbMes += 1
+
+        prog = os.path.join(self._instance.ds_paths.bin_dir, 'ldclt')
+        cmd = [ prog,
+            '-h',
+            f'{self._instance.host}',
+            '-p',
+            f'{self._instance.port}',
+            '-D',
+            f'{self._instance.binddn}',
+            '-w',
+            f'{self._instance.bindpw}',
+            '-N', str(nbMes),
+            '-n', str(nbThreads) ]
+        for key in args.keys():
+            cmd.append(str(key))
+            val = args[key]
+            if (val):
+                cmd.append(str(val))
+        start_time = time.time()
+        tmout = 30+10*nbMes
+        print (f"Running ldclt with a timeout of {tmout} seconds ...\r")
+        try:
+            result = subprocess.run(args=cmd, capture_output=True, timeout=tmout)
+        except subprocess.CalledProcessError as e:
+            self.log("log", f'{e.cmd} failed.  measure: {measure_name}\n' +
+                      f'instance: {self._instance.serverid}\n' +
+                      f'return code is {e.returncode}.\n' +
+                      f'stdout: {e.stdout}\n' +
+                      f'stderr: {e.stderr}\n' )
+            raise e
+        print (" Done.")
+        stop_time = time.time()
+        # Lets parse the result
+        res = { "measure_name" : measure_name,
+                "cmd" : cmd,
+                "stdout" : result.stdout,
+                "stderr" : result.stderr,
+                "returncode" : result.returncode,
+                "start_time" : start_time,
+                "stop_time" : stop_time,
+                "stop_time" : stop_time,
+                "nb_threads" : nbThreads,
+                **self.getEnvInfo() }
+        rawres = re.findall(r'Average rate: [^ ]*\s*.([^/]*)', str(result.stdout))
+        rawres = [float(i) for i in rawres]
+        res["measure0"] = rawres[0]
+        res["rawresults"] = rawres[1:]   # Discard first measure
+        return self.finalizeResult(res)
+
+    def measure_search_by_uid(self, name, nb_threads = 1):
+        nb_users = self._options['nbUsers']
+        args  = { "-b" : self._users_parents_dn,
+                  "-f" : "uid=XXXXXXXXXX",
+                  "-e" : "esearch,random",
+                  "-r0" : None,
+                  f"-R{nb_users-1}" : None }
+        return self.ldclt(name, args, nbThreads=nb_threads)
+
+    # I wish I could make the base dn vary rather than use the dn in filter
+    # but I did not find how to do that (the RDN trick as in modify
+    #  generates the same search than measure_search_by_uid test)
+    def measure_search_by_filtering_the_dn(self, name, nb_threads = 1):
+        nb_users = self._options['nbUsers']
+        args  = { "-b" : self._users_parents_dn,
+                  "-f" : "uid:dn:=XXXXXXXXXX",
+                  "-e" : "esearch,random",
+                  "-r0" : None,
+                  f"-R{nb_users-1}" : None }
+        return self.ldclt(name, args, nbThreads=nb_threads)
+
+    def measure_modify(self, name, nb_threads = 1):
+        nb_users = self._options['nbUsers']
+        args  = { "-b" : self._users_parents_dn,
+                  "-e" : f"rdn=uid:[RNDN(0;{nb_users-1};10)],object={self._ldclt_template},attreplace=sn: random modify XXXXX" }
+        return self.ldclt(name, args, nbThreads=nb_threads)
+
+    def offline_export(self):
+        start_time = time.time()
+        assert (self._instance.db2ldif(DEFAULT_BENAME, (self._options['suffix'],), None, None, None, self._ldif))
+        stop_time = time.time()
+        # Count entries in ldif file (if not already done)
+        if not self._nbEntries:
+            self._nbEntries = 0
+            with open(self._ldif) as f:
+                for line in f:
+                    if (line.startswith("dn:")):
+                        self._nbEntries += 1
+        return self._nbEntries / (stop_time - start_time)
+
+    def offline_import(self):
+        start_time = time.time()
+        assert (self._instance.ldif2db(DEFAULT_BENAME, None, None, None, self._ldif))
+        stop_time = time.time()
+        return self._nbEntries / (stop_time - start_time)
+
+    def _do_measure(self, measure_name, measure_cb, nbMes):
+        # Perform non ldcltl measure
+        #
+        first_time = time.time()
+        rawres = []
+        for m in range(nbMes):
+            try:
+                rawres.append( measure_cb() )
+                stop_time = time.time()
+            except AssertionError:
+                continue
+        last_time = time.time()
+        # Lets parse the result
+        res = { "measure_name" : measure_name,
+                "start_time" : first_time,
+                "stop_time" : last_time,
+                "nb_measures" : nbMes,
+                "rawresults" : rawres,
+                **self.getEnvInfo() }
+        return self.finalizeResult(res)
+
+    def mesure_export_import(self, nbMes=10):
+        self._instance.stop()
+        self._ldif = self.getFilePath("db.ldif");
+        self._nbEntries = None
+        res = [ self._do_measure("export", self.offline_export, nbMes), self._do_measure("import", self.offline_import, nbMes) ]
+        self._instance.start()
+        return res;
+
+    class Tester:
+        # Basic tester (used to define ldclt tests)
+        def __init__(self, name, description, method_name):
+            self._base_name = name
+            self._base_description = description
+            self._method_name = method_name
+
+        def name(self):
+            return self._base_name
+
+        def argsused(self):
+            return [ "nb_threads", "name" ]
+
+        def description(self):
+            return self._base_description
+
+        def run(self, perftools, args):
+            args['name'] = self._base_name
+            res = getattr(perftools, self._method_name)(self._base_name, nb_threads=args['nb_threads']);
+            print (res['pretty'])
+
+        @staticmethod
+        def initTester(args):
+            os.environ["NSSLAPD_DB_LIB"] = args['db_lib']
+            perftools = PerformanceTools( args )
+            perftools.initInstance()
+            return perftools;
+
+    class TesterImportExport(Tester):
+        # A special tester for export/import
+        def __init__(self):
+            super().__init__("export/import",
+                "Measure export rate in entries per seconds then measure import rate.",
+                 None)
+
+        def argsused(self):
+            return []
+
+        def run(self, perftools, args=None):
+            res = perftools.mesure_export_import()
+            for r in res:
+                print (r['pretty'])
+
+    @staticmethod
+    def listTests():
+        # List of test for which args.nb_threads is useful
+        return { t.name() :  t for t in [
+            PerformanceTools.Tester("search_uid", "Measure number of searches per seconds using filter with random existing uid.", "measure_search_by_uid"),
+            PerformanceTools.Tester("search_uid_in_dn", "Measure number of searches per seconds using filter with random existing uid in dn (i.e: (uid:dn:uid_value)).", "measure_search_by_filtering_the_dn"),
+            PerformanceTools.Tester("modify_sn", "Measure number of modify per seconds replacing sn by random value on random entries.", "measure_modify"),
+            PerformanceTools.TesterImportExport(),
+        ] }
+
+    @staticmethod
+    def runAllTests(options):
+        for users in ( 100, 1000, 10000, 100000, 1000000 ):
+            for db in ( 'bdb', 'mdb' ):
+                perftools = PerformanceTools.Tester.initTester({**options, 'nbUsers': users, 'db_lib': db})
+                for t in PerformanceTools.listTests().values():
+                    if 'nb_threads' in t.argsused():
+                        for nbthreads in ( 1, 4, 8 ):
+                            t.run(perftools, { "nb_threads" : nbthreads })
+                    else:
+                        t.run(perftools)
+
+
diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
deleted file mode 100644
index f4f2dbe10..000000000
--- a/src/lib389/lib389/topologies.py
+++ /dev/null
@@ -1,561 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2021 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import logging
-import socket  # For hostname detection for GSSAPI tests
-import pytest
-from lib389 import DirSrv
-from lib389.utils import generate_ds_params, is_fips
-from lib389.mit_krb5 import MitKrb5
-from lib389.saslmap import SaslMappings
-from lib389.replica import ReplicationManager, Replicas
-from lib389.nss_ssl import NssSsl
-from lib389._constants import *
-from lib389.cli_base import LogCapture
-
-TLS_HOSTNAME_CHECK = os.getenv('TLS_HOSTNAME_CHECK', default=True)
-DEBUGGING = os.getenv('DEBUGGING', default=False)
-if DEBUGGING:
-    logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
-    logging.getLogger(__name__).setLevel(logging.INFO)
-log = logging.getLogger(__name__)
-
-
-def _remove_ssca_db(topology):
-    ssca = NssSsl(dbpath=topology[0].get_ssca_dir())
-    if ssca._db_exists():
-        return ssca.remove_db()
-    else:
-        return True
-
-
-def _create_instances(topo_dict, suffix):
-    """Create requested instances without replication or any other modifications
-
-    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.SUPPLIER: num,
-                                    ReplicaRole.HUB: num, ReplicaRole.CONSUMER: num}
-    :type topo_dict: dict
-    :param suffix: a suffix
-    :type suffix: str
-
-    :return - TopologyMain object
-    """
-
-    instances = {}
-    ms = {}
-    cs = {}
-    hs = {}
-    ins = {}
-
-    # Create instances
-    for role in topo_dict.keys():
-        for inst_num in range(1, topo_dict[role]+1):
-            instance_data = generate_ds_params(inst_num, role)
-            if DEBUGGING:
-                instance = DirSrv(verbose=True)
-            else:
-                instance = DirSrv(verbose=False)
-            # TODO: Put 'args_instance' to generate_ds_params.
-            # Also, we need to keep in mind that the function returns
-            # SER_SECURE_PORT and REPLICA_ID that are not used in
-            # the instance creation here.
-            # args_instance[SER_HOST] = instance_data[SER_HOST]
-            args_instance = {}
-            args_instance[SER_PORT] = instance_data[SER_PORT]
-            args_instance[SER_SECURE_PORT] = instance_data[SER_SECURE_PORT]
-            args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP]
-            # It's required to be able to make a suffix-less install for
-            # some cli tests. It's invalid to require replication with
-            # no suffix however ....
-            if suffix is not None:
-                args_instance[SER_CREATION_SUFFIX] = suffix
-            elif role != ReplicaRole.STANDALONE:
-                raise AssertionError("Invalid request to make suffix-less replicated environment")
-
-            instance.allocate(args_instance)
-
-            instance_exists = instance.exists()
-
-            if instance_exists:
-                instance.delete()
-
-            instance.create()
-            # We set a URL here to force ldap:// only. Once we turn on TLS
-            # we'll flick this to ldaps.
-            instance.use_ldap_uri()
-            instance.open()
-            instance.config.set('nsslapd-accesslog-logbuffering','off')
-            if role == ReplicaRole.STANDALONE:
-                ins[instance.serverid] = instance
-                instances.update(ins)
-            if role == ReplicaRole.SUPPLIER:
-                ms[instance.serverid] = instance
-                instances.update(ms)
-            if role == ReplicaRole.CONSUMER:
-                cs[instance.serverid] = instance
-                instances.update(cs)
-            if role == ReplicaRole.HUB:
-                hs[instance.serverid] = instance
-                instances.update(hs)
-            # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
-            # configured in a FIPS compliant way
-            if is_fips():
-                instance.enable_tls()
-
-            # Disable strict hostname checking for TLS
-            if not TLS_HOSTNAME_CHECK:
-                instance.config.set('nsslapd-ssl-check-hostname', 'off')
-            if DEBUGGING:
-                instance.config.set('nsslapd-errorlog-level','8192')
-                instance.config.set('nsslapd-accesslog-level','260')
-                instance.config.set('nsslapd-auditlog-logging-enabled','on')
-                instance.config.set('nsslapd-auditfaillog-logging-enabled','on')
-                instance.config.set('nsslapd-plugin-logging', 'on')
-            log.info("Instance with parameters {} was created.".format(args_instance))
-
-    if "standalone1" in instances and len(instances) == 1:
-        return TopologyMain(standalones=instances["standalone1"])
-    else:
-        return TopologyMain(standalones=ins, suppliers=ms, consumers=cs, hubs=hs)
-
-
-def create_topology(topo_dict, suffix=DEFAULT_SUFFIX):
-    """Create a requested topology. Cascading replication scenario isn't supported
-
-    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.SUPPLIER: num,
-                                   ReplicaRole.CONSUMER: num}
-    :type topo_dict: dict
-    :param suffix: a suffix for the replication
-    :type suffix: str
-
-    :return - TopologyMain object
-    """
-
-    if not topo_dict:
-        ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}")
-
-    if ReplicaRole.HUB in topo_dict.keys():
-        NotImplementedError("Cascading replication scenario isn't supported."
-                            "Please, use existing topology or create your own.")
-
-    topo = _create_instances(topo_dict, suffix)
-
-    # Start with a single supplier, and create it "first".
-    first_supplier = None
-    try:
-        first_supplier = list(topo.ms.values())[0]
-        log.info("Creating replication topology.")
-        # Now get the first supplier ready.
-        repl = ReplicationManager(DEFAULT_SUFFIX)
-        repl.create_first_supplier(first_supplier)
-    except IndexError:
-        pass
-
-    # Now init the other suppliers from this.
-    # This will reinit m, and put a bi-directional agreement
-    # in place.
-    for m in topo.ms.values():
-        # Skip firstsupplier.
-        if m is first_supplier:
-            continue
-        log.info("Joining supplier %s to %s ..." % (m.serverid, first_supplier.serverid))
-        repl.join_supplier(first_supplier, m)
-
-    # Mesh the supplier agreements.
-    for mo in topo.ms.values():
-        for mi in topo.ms.values():
-            if mo is mi:
-                continue
-            log.info("Ensuring supplier %s to %s ..." % (mo.serverid, mi.serverid))
-            repl.ensure_agreement(mo, mi)
-
-    # Add supplier -> consumer agreements.
-    for c in topo.cs.values():
-        log.info("Joining consumer %s from %s ..." % (c.serverid, first_supplier.serverid))
-        repl.join_consumer(first_supplier, c)
-
-    for m in topo.ms.values():
-        for c in topo.cs.values():
-            log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid))
-            repl.ensure_agreement(m, c)
-
-    # Clear out the tmp dir
-    for instance in topo:
-        instance.clearTmpDir(__file__)
-
-    return topo
-
-
-class TopologyMain(object):
-    def __init__(self, standalones=None, suppliers=None, consumers=None, hubs=None):
-        self.ms = {}
-        self.cs = {}
-        self.hs = {}
-        self.all_insts = {}
-
-        if standalones:
-            if isinstance(standalones, dict):
-                self.ins = standalones
-                self.all_insts.update(standalones)
-            else:
-                self.standalone = standalones
-                self.all_insts['standalone1'] = standalones
-        if suppliers:
-            self.ms = suppliers
-            self.all_insts.update(self.ms)
-        if consumers:
-            self.cs = consumers
-            self.all_insts.update(self.cs)
-        if hubs:
-            self.hs = hubs
-            self.all_insts.update(self.hs)
-
-    def __iter__(self):
-        return self.all_insts.values().__iter__()
-
-    def __getitem__(self, index):
-        return list(self.all_insts.values())[index]
-
-    def pause_all_replicas(self):
-        """Pause all agreements in the class instance"""
-
-        for inst in self.all_insts.values():
-            replicas = Replicas(inst)
-            replica = replicas.get(DEFAULT_SUFFIX)
-            for agreement in replica.get_agreements().list():
-                agreement.pause()
-
-    def resume_all_replicas(self):
-        """Resume all agreements in the class instance"""
-
-        for inst in self.all_insts.values():
-            replicas = Replicas(inst)
-            replica = replicas.get(DEFAULT_SUFFIX)
-            for agreement in replica.get_agreements().list():
-                agreement.resume()
-
-    def all_get_dsldapobject(self, dn, otype):
-        result = []
-        for inst in self.all_insts.values():
-            o = otype(inst, dn)
-            result.append(o)
-        return result
-
-
-@pytest.fixture(scope="module")
-def topology_st(request):
-    """Create DS standalone instance"""
-
-    topology = create_topology({ReplicaRole.STANDALONE: 1})
-
-    def fin():
-        topology.standalone.stop()
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            if topology.standalone.exists():
-                topology.standalone.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-gssapi_ack = pytest.mark.skipif(not os.environ.get('GSSAPI_ACK', False), reason="GSSAPI tests may damage system configuration.")
-
-@pytest.fixture(scope="module")
-def topology_st_gssapi(request):
-    """Create a DS standalone instance with GSSAPI enabled.
-
-    This will alter the instance to remove the secure port, to allow
-    GSSAPI to function.
-    """
-    hostname = socket.gethostname().split('.', 1)
-
-    # Assert we have a domain setup in some kind.
-    assert len(hostname) == 2
-
-    REALM = hostname[1].upper()
-
-    topology = create_topology({ReplicaRole.STANDALONE: 1})
-
-    # Fix the hostname.
-    topology.standalone.host = socket.gethostname()
-
-    krb = MitKrb5(realm=REALM, debug=DEBUGGING)
-
-    # Destroy existing realm.
-    if krb.check_realm():
-        krb.destroy_realm()
-    krb.create_realm()
-
-    # Now add krb to our instance.
-    krb.create_principal(principal='ldap/%s' % topology.standalone.host)
-    krb.create_keytab(principal='ldap/%s' % topology.standalone.host, keytab='/etc/krb5.keytab')
-    os.chown('/etc/krb5.keytab', topology.standalone.get_user_uid(), topology.standalone.get_group_gid())
-
-    # Add sasl mappings
-    saslmappings = SaslMappings(topology.standalone)
-    # First, purge all the default maps.
-    [m.delete() for m in saslmappings.list()]
-    # Now create a single map that works for our case.
-    saslmappings.create(properties={
-        'cn': 'suffix map',
-        # Don't add the realm due to a SASL bug
-        # 'nsSaslMapRegexString': '\\(.*\\)@%s' % self.realm,
-        'nsSaslMapRegexString': '\\(.*\\)',
-        'nsSaslMapBaseDNTemplate': topology.standalone.creation_suffix,
-        'nsSaslMapFilterTemplate': '(uid=\\1)'
-    })
-    topology.standalone.realm = krb
-
-    topology.standalone.config.set('nsslapd-localhost', topology.standalone.host)
-
-    topology.standalone.sslport = None
-
-    topology.standalone.restart()
-
-    topology.standalone.clearTmpDir(__file__)
-
-    def fin():
-        topology.standalone.stop()
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            if topology.standalone.exists():
-                topology.standalone.delete()
-            krb.destroy_realm()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_no_sample(request):
-    """Create instance without sample entries to reproduce not initialised database"""
-
-    topology = create_topology({ReplicaRole.STANDALONE: 1}, None)
-    topology.standalone.backends.create(properties={
-        'cn': 'userRoot',
-        'nsslapd-suffix': DEFAULT_SUFFIX,
-    })
-
-    def fin():
-        topology.standalone.stop()
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            if topology.standalone.exists():
-                topology.standalone.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_i2(request):
-    """Create two instance DS deployment"""
-
-    topology = create_topology({ReplicaRole.STANDALONE: 2})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_i3(request):
-    """Create three instance DS deployment"""
-
-    topology = create_topology({ReplicaRole.STANDALONE: 3})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-@pytest.fixture(scope="module")
-def topology_m1(request):
-    """Create Replication Deployment with one supplier and one consumer"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 1})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-@pytest.fixture(scope="module")
-def topology_m1c1(request):
-    """Create Replication Deployment with one supplier and one consumer"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 1,
-                                ReplicaRole.CONSUMER: 1})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_m2(request):
-    """Create Replication Deployment with two suppliers"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 2})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_m3(request):
-    """Create Replication Deployment with three suppliers"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 3})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_m4(request):
-    """Create Replication Deployment with four suppliers"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 4})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_m2c2(request):
-    """Create Replication Deployment with two suppliers and two consumers"""
-
-    topology = create_topology({ReplicaRole.SUPPLIER: 2,
-                                ReplicaRole.CONSUMER: 2})
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
-
-
-@pytest.fixture(scope="module")
-def topology_m1h1c1(request):
-    """Create Replication Deployment with one supplier, one consumer and one hub"""
-
-    topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1}
-    topology = _create_instances(topo_roles, DEFAULT_SUFFIX)
-    supplier = topology.ms["supplier1"]
-    hub = topology.hs["hub1"]
-    consumer = topology.cs["consumer1"]
-
-    # Start with the supplier, and create it "first".
-    log.info("Creating replication topology.")
-    # Now get the first supplier ready.
-    repl = ReplicationManager(DEFAULT_SUFFIX)
-    repl.create_first_supplier(supplier)
-    # Finish the topology creation
-    repl.join_hub(supplier, hub)
-    repl.join_consumer(hub, consumer)
-
-    repl.test_replication(supplier, consumer)
-
-    # Clear out the tmp dir
-    for instance in topology:
-        instance.clearTmpDir(__file__)
-
-    def fin():
-        [inst.stop() for inst in topology]
-        if DEBUGGING is None:
-            assert _remove_ssca_db(topology)
-            for inst in topology:
-                if inst.exists():
-                    inst.delete()
-
-    request.addfinalizer(fin)
-
-    topology.logcap = LogCapture()
-    return topology
diff --git a/src/lib389/lib389/topologies/__init__.py b/src/lib389/lib389/topologies/__init__.py
new file mode 100644
index 000000000..f4f2dbe10
--- /dev/null
+++ b/src/lib389/lib389/topologies/__init__.py
@@ -0,0 +1,561 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import os
+import logging
+import socket  # For hostname detection for GSSAPI tests
+import pytest
+from lib389 import DirSrv
+from lib389.utils import generate_ds_params, is_fips
+from lib389.mit_krb5 import MitKrb5
+from lib389.saslmap import SaslMappings
+from lib389.replica import ReplicationManager, Replicas
+from lib389.nss_ssl import NssSsl
+from lib389._constants import *
+from lib389.cli_base import LogCapture
+
+TLS_HOSTNAME_CHECK = os.getenv('TLS_HOSTNAME_CHECK', default=True)
+DEBUGGING = os.getenv('DEBUGGING', default=False)
+if DEBUGGING:
+    logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+    logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+def _remove_ssca_db(topology):
+    ssca = NssSsl(dbpath=topology[0].get_ssca_dir())
+    if ssca._db_exists():
+        return ssca.remove_db()
+    else:
+        return True
+
+
+def _create_instances(topo_dict, suffix):
+    """Create requested instances without replication or any other modifications
+
+    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.SUPPLIER: num,
+                                    ReplicaRole.HUB: num, ReplicaRole.CONSUMER: num}
+    :type topo_dict: dict
+    :param suffix: a suffix
+    :type suffix: str
+
+    :return - TopologyMain object
+    """
+
+    instances = {}
+    ms = {}
+    cs = {}
+    hs = {}
+    ins = {}
+
+    # Create instances
+    for role in topo_dict.keys():
+        for inst_num in range(1, topo_dict[role]+1):
+            instance_data = generate_ds_params(inst_num, role)
+            if DEBUGGING:
+                instance = DirSrv(verbose=True)
+            else:
+                instance = DirSrv(verbose=False)
+            # TODO: Put 'args_instance' to generate_ds_params.
+            # Also, we need to keep in mind that the function returns
+            # SER_SECURE_PORT and REPLICA_ID that are not used in
+            # the instance creation here.
+            # args_instance[SER_HOST] = instance_data[SER_HOST]
+            args_instance = {}
+            args_instance[SER_PORT] = instance_data[SER_PORT]
+            args_instance[SER_SECURE_PORT] = instance_data[SER_SECURE_PORT]
+            args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP]
+            # It's required to be able to make a suffix-less install for
+            # some cli tests. It's invalid to require replication with
+            # no suffix however ....
+            if suffix is not None:
+                args_instance[SER_CREATION_SUFFIX] = suffix
+            elif role != ReplicaRole.STANDALONE:
+                raise AssertionError("Invalid request to make suffix-less replicated environment")
+
+            instance.allocate(args_instance)
+
+            instance_exists = instance.exists()
+
+            if instance_exists:
+                instance.delete()
+
+            instance.create()
+            # We set a URL here to force ldap:// only. Once we turn on TLS
+            # we'll flick this to ldaps.
+            instance.use_ldap_uri()
+            instance.open()
+            instance.config.set('nsslapd-accesslog-logbuffering','off')
+            if role == ReplicaRole.STANDALONE:
+                ins[instance.serverid] = instance
+                instances.update(ins)
+            if role == ReplicaRole.SUPPLIER:
+                ms[instance.serverid] = instance
+                instances.update(ms)
+            if role == ReplicaRole.CONSUMER:
+                cs[instance.serverid] = instance
+                instances.update(cs)
+            if role == ReplicaRole.HUB:
+                hs[instance.serverid] = instance
+                instances.update(hs)
+            # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
+            # configured in a FIPS compliant way
+            if is_fips():
+                instance.enable_tls()
+
+            # Disable strict hostname checking for TLS
+            if not TLS_HOSTNAME_CHECK:
+                instance.config.set('nsslapd-ssl-check-hostname', 'off')
+            if DEBUGGING:
+                instance.config.set('nsslapd-errorlog-level','8192')
+                instance.config.set('nsslapd-accesslog-level','260')
+                instance.config.set('nsslapd-auditlog-logging-enabled','on')
+                instance.config.set('nsslapd-auditfaillog-logging-enabled','on')
+                instance.config.set('nsslapd-plugin-logging', 'on')
+            log.info("Instance with parameters {} was created.".format(args_instance))
+
+    if "standalone1" in instances and len(instances) == 1:
+        return TopologyMain(standalones=instances["standalone1"])
+    else:
+        return TopologyMain(standalones=ins, suppliers=ms, consumers=cs, hubs=hs)
+
+
+def create_topology(topo_dict, suffix=DEFAULT_SUFFIX):
+    """Create a requested topology. Cascading replication scenario isn't supported
+
+    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.SUPPLIER: num,
+                                   ReplicaRole.CONSUMER: num}
+    :type topo_dict: dict
+    :param suffix: a suffix for the replication
+    :type suffix: str
+
+    :return - TopologyMain object
+    """
+
+    if not topo_dict:
+        ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}")
+
+    if ReplicaRole.HUB in topo_dict.keys():
+        NotImplementedError("Cascading replication scenario isn't supported."
+                            "Please, use existing topology or create your own.")
+
+    topo = _create_instances(topo_dict, suffix)
+
+    # Start with a single supplier, and create it "first".
+    first_supplier = None
+    try:
+        first_supplier = list(topo.ms.values())[0]
+        log.info("Creating replication topology.")
+        # Now get the first supplier ready.
+        repl = ReplicationManager(DEFAULT_SUFFIX)
+        repl.create_first_supplier(first_supplier)
+    except IndexError:
+        pass
+
+    # Now init the other suppliers from this.
+    # This will reinit m, and put a bi-directional agreement
+    # in place.
+    for m in topo.ms.values():
+        # Skip firstsupplier.
+        if m is first_supplier:
+            continue
+        log.info("Joining supplier %s to %s ..." % (m.serverid, first_supplier.serverid))
+        repl.join_supplier(first_supplier, m)
+
+    # Mesh the supplier agreements.
+    for mo in topo.ms.values():
+        for mi in topo.ms.values():
+            if mo is mi:
+                continue
+            log.info("Ensuring supplier %s to %s ..." % (mo.serverid, mi.serverid))
+            repl.ensure_agreement(mo, mi)
+
+    # Add supplier -> consumer agreements.
+    for c in topo.cs.values():
+        log.info("Joining consumer %s from %s ..." % (c.serverid, first_supplier.serverid))
+        repl.join_consumer(first_supplier, c)
+
+    for m in topo.ms.values():
+        for c in topo.cs.values():
+            log.info("Ensuring consumer %s from %s ..." % (c.serverid, m.serverid))
+            repl.ensure_agreement(m, c)
+
+    # Clear out the tmp dir
+    for instance in topo:
+        instance.clearTmpDir(__file__)
+
+    return topo
+
+
+class TopologyMain(object):
+    def __init__(self, standalones=None, suppliers=None, consumers=None, hubs=None):
+        self.ms = {}
+        self.cs = {}
+        self.hs = {}
+        self.all_insts = {}
+
+        if standalones:
+            if isinstance(standalones, dict):
+                self.ins = standalones
+                self.all_insts.update(standalones)
+            else:
+                self.standalone = standalones
+                self.all_insts['standalone1'] = standalones
+        if suppliers:
+            self.ms = suppliers
+            self.all_insts.update(self.ms)
+        if consumers:
+            self.cs = consumers
+            self.all_insts.update(self.cs)
+        if hubs:
+            self.hs = hubs
+            self.all_insts.update(self.hs)
+
+    def __iter__(self):
+        return self.all_insts.values().__iter__()
+
+    def __getitem__(self, index):
+        return list(self.all_insts.values())[index]
+
+    def pause_all_replicas(self):
+        """Pause all agreements in the class instance"""
+
+        for inst in self.all_insts.values():
+            replicas = Replicas(inst)
+            replica = replicas.get(DEFAULT_SUFFIX)
+            for agreement in replica.get_agreements().list():
+                agreement.pause()
+
+    def resume_all_replicas(self):
+        """Resume all agreements in the class instance"""
+
+        for inst in self.all_insts.values():
+            replicas = Replicas(inst)
+            replica = replicas.get(DEFAULT_SUFFIX)
+            for agreement in replica.get_agreements().list():
+                agreement.resume()
+
+    def all_get_dsldapobject(self, dn, otype):
+        result = []
+        for inst in self.all_insts.values():
+            o = otype(inst, dn)
+            result.append(o)
+        return result
+
+
+@pytest.fixture(scope="module")
+def topology_st(request):
+    """Create DS standalone instance"""
+
+    topology = create_topology({ReplicaRole.STANDALONE: 1})
+
+    def fin():
+        topology.standalone.stop()
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            if topology.standalone.exists():
+                topology.standalone.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+gssapi_ack = pytest.mark.skipif(not os.environ.get('GSSAPI_ACK', False), reason="GSSAPI tests may damage system configuration.")
+
+@pytest.fixture(scope="module")
+def topology_st_gssapi(request):
+    """Create a DS standalone instance with GSSAPI enabled.
+
+    This will alter the instance to remove the secure port, to allow
+    GSSAPI to function.
+    """
+    hostname = socket.gethostname().split('.', 1)
+
+    # Assert we have a domain setup in some kind.
+    assert len(hostname) == 2
+
+    REALM = hostname[1].upper()
+
+    topology = create_topology({ReplicaRole.STANDALONE: 1})
+
+    # Fix the hostname.
+    topology.standalone.host = socket.gethostname()
+
+    krb = MitKrb5(realm=REALM, debug=DEBUGGING)
+
+    # Destroy existing realm.
+    if krb.check_realm():
+        krb.destroy_realm()
+    krb.create_realm()
+
+    # Now add krb to our instance.
+    krb.create_principal(principal='ldap/%s' % topology.standalone.host)
+    krb.create_keytab(principal='ldap/%s' % topology.standalone.host, keytab='/etc/krb5.keytab')
+    os.chown('/etc/krb5.keytab', topology.standalone.get_user_uid(), topology.standalone.get_group_gid())
+
+    # Add sasl mappings
+    saslmappings = SaslMappings(topology.standalone)
+    # First, purge all the default maps.
+    [m.delete() for m in saslmappings.list()]
+    # Now create a single map that works for our case.
+    saslmappings.create(properties={
+        'cn': 'suffix map',
+        # Don't add the realm due to a SASL bug
+        # 'nsSaslMapRegexString': '\\(.*\\)@%s' % self.realm,
+        'nsSaslMapRegexString': '\\(.*\\)',
+        'nsSaslMapBaseDNTemplate': topology.standalone.creation_suffix,
+        'nsSaslMapFilterTemplate': '(uid=\\1)'
+    })
+    topology.standalone.realm = krb
+
+    topology.standalone.config.set('nsslapd-localhost', topology.standalone.host)
+
+    topology.standalone.sslport = None
+
+    topology.standalone.restart()
+
+    topology.standalone.clearTmpDir(__file__)
+
+    def fin():
+        topology.standalone.stop()
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            if topology.standalone.exists():
+                topology.standalone.delete()
+            krb.destroy_realm()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_no_sample(request):
+    """Create instance without sample entries to reproduce not initialised database"""
+
+    topology = create_topology({ReplicaRole.STANDALONE: 1}, None)
+    topology.standalone.backends.create(properties={
+        'cn': 'userRoot',
+        'nsslapd-suffix': DEFAULT_SUFFIX,
+    })
+
+    def fin():
+        topology.standalone.stop()
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            if topology.standalone.exists():
+                topology.standalone.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_i2(request):
+    """Create two instance DS deployment"""
+
+    topology = create_topology({ReplicaRole.STANDALONE: 2})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_i3(request):
+    """Create three instance DS deployment"""
+
+    topology = create_topology({ReplicaRole.STANDALONE: 3})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+@pytest.fixture(scope="module")
+def topology_m1(request):
+    """Create Replication Deployment with one supplier and one consumer"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 1})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+@pytest.fixture(scope="module")
+def topology_m1c1(request):
+    """Create Replication Deployment with one supplier and one consumer"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 1,
+                                ReplicaRole.CONSUMER: 1})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_m2(request):
+    """Create Replication Deployment with two suppliers"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 2})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_m3(request):
+    """Create Replication Deployment with three suppliers"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 3})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_m4(request):
+    """Create Replication Deployment with four suppliers"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 4})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_m2c2(request):
+    """Create Replication Deployment with two suppliers and two consumers"""
+
+    topology = create_topology({ReplicaRole.SUPPLIER: 2,
+                                ReplicaRole.CONSUMER: 2})
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
+
+
+@pytest.fixture(scope="module")
+def topology_m1h1c1(request):
+    """Create Replication Deployment with one supplier, one consumer and one hub"""
+
+    topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1}
+    topology = _create_instances(topo_roles, DEFAULT_SUFFIX)
+    supplier = topology.ms["supplier1"]
+    hub = topology.hs["hub1"]
+    consumer = topology.cs["consumer1"]
+
+    # Start with the supplier, and create it "first".
+    log.info("Creating replication topology.")
+    # Now get the first supplier ready.
+    repl = ReplicationManager(DEFAULT_SUFFIX)
+    repl.create_first_supplier(supplier)
+    # Finish the topology creation
+    repl.join_hub(supplier, hub)
+    repl.join_consumer(hub, consumer)
+
+    repl.test_replication(supplier, consumer)
+
+    # Clear out the tmp dir
+    for instance in topology:
+        instance.clearTmpDir(__file__)
+
+    def fin():
+        [inst.stop() for inst in topology]
+        if DEBUGGING is None:
+            assert _remove_ssca_db(topology)
+            for inst in topology:
+                if inst.exists():
+                    inst.delete()
+
+    request.addfinalizer(fin)
+
+    topology.logcap = LogCapture()
+    return topology
diff --git a/src/lib389/setup.py b/src/lib389/setup.py
index ebfbf636b..3ad6f2810 100644
--- a/src/lib389/setup.py
+++ b/src/lib389/setup.py
@@ -68,7 +68,16 @@ setup(
         'Topic :: Software Development :: Testing'],
 
     keywords='389 directory server test configure',
-    packages=find_packages(exclude=['tests*']),
+    packages=find_packages(
+        exclude=[
+            '*.tests',
+            '*.tests.*',
+            'lib389.topologies',
+            'lib389.topologies.*',
+            'lib389.perftools',
+            'lib389.perftools.*',
+        ]
+    ),
 
     # find lib389/clitools -name ds\* -exec echo \''{}'\', \;
     data_files=[
 
дизайн и разработка: Vladimir Lettiev aka crux © 2004-2005, Andrew Avramenko aka liks © 2007-2008
текущий майнтейнер: Michael Shigorin