tests: micronet: update infra

Signed-off-by: Christian Hopps <chopps@labn.net>
This commit is contained in:
Christian Hopps 2021-07-26 23:23:20 +00:00
parent 6a5433ef0b
commit 4958158787
18 changed files with 1471 additions and 944 deletions

6
.pylintrc Normal file
View File

@ -0,0 +1,6 @@
[MASTER]
init-hook="import sys; sys.path.insert(0, '..')"
signature-mutators=common_config.retry,retry
[MESSAGES CONTROL]
disable=I,C,R,W

View File

@ -108,10 +108,10 @@ def setup_module(mod):
for rname, router in router_list.items():
# create VRF rx-bfd-cust1 and link rx-eth0 to rx-bfd-cust1
for cmd in cmds:
output = tgen.net[rname].cmd(cmd.format(rname))
output = tgen.net[rname].cmd_raises(cmd.format(rname))
if rname == "r2":
for cmd in cmds2:
output = tgen.net[rname].cmd(cmd.format(rname))
output = tgen.net[rname].cmd_raises(cmd.format(rname))
for rname, router in router_list.items():
router.load_config(

View File

@ -30,6 +30,9 @@ test_evpn_mh.py: Testing EVPN multihoming
import os
import re
import sys
import subprocess
from functools import partial
import pytest
import json
import platform
@ -599,18 +602,20 @@ def test_evpn_ead_update():
def ping_anycast_gw(tgen):
# ping the anycast gw from the local and remote hosts to populate
# the mac address on the PEs
python3_path = tgen.net.get_exec_path(["python3", "python"])
script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
intf = "torbond"
ipaddr = "45.0.0.1"
ping_cmd = [
python3_path,
script_path,
"--imports=Ether,ARP",
"--interface=" + intf,
"'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr)
'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr)
]
for name in ("hostd11", "hostd21"):
host = tgen.net[name]
stdout = host.cmd(ping_cmd)
host = tgen.net.hosts[name]
_, stdout, _ = host.cmd_status(ping_cmd, warn=False, stderr=subprocess.STDOUT)
stdout = stdout.strip()
if stdout:
host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout)

View File

@ -117,8 +117,6 @@ NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
LOOPBACK_1 = {
"ipv4": "10.0.0.7/24",
"ipv6": "fd00:0:0:1::7/64",
"ipv4_mask": "255.255.255.0",
"ipv6_mask": None,
}
LOOPBACK_2 = {
"ipv4": "10.0.0.16/24",

View File

@ -6,15 +6,23 @@ import glob
import os
import pdb
import re
import pytest
import sys
import time
from lib.topogen import get_topogen, diagnose_env
from lib.topotest import json_cmp_result
from lib.topotest import g_extra_config as topotest_extra_config
import pytest
import lib.fixtures
from lib import topolog
from lib.micronet import Commander
from lib.micronet_cli import cli
from lib.micronet_compat import Mininet, cleanup_current, cleanup_previous
from lib.topogen import diagnose_env, get_topogen
from lib.topolog import logger
from lib.topotest import g_extra_config as topotest_extra_config
from lib.topotest import json_cmp_result
try:
from _pytest._code.code import ExceptionInfo
leak_check_ok = True
except ImportError:
leak_check_ok = False
@ -31,6 +39,12 @@ def pytest_addoption(parser):
help="Configure address sanitizer to abort process on error",
)
parser.addoption(
"--cli-on-error",
action="store_true",
help="Mininet cli on test failure",
)
parser.addoption(
"--gdb-breakpoints",
metavar="SYMBOL[,SYMBOL...]",
@ -50,17 +64,28 @@ def pytest_addoption(parser):
)
parser.addoption(
"--mininet-on-error",
action="store_true",
help="Mininet cli on test failure",
)
parser.addoption(
"--pause-after",
"--pause",
action="store_true",
help="Pause after each test",
)
parser.addoption(
"--pause-on-error",
action="store_true",
help="Do not pause after (disables default when --shell or -vtysh given)",
)
parser.addoption(
"--no-pause-on-error",
dest="pause_on_error",
action="store_false",
help="Do not pause after (disables default when --shell or -vtysh given)",
)
rundir_help="directory for running in and log files"
parser.addini("rundir", rundir_help, default="/tmp/topotests")
parser.addoption("--rundir", metavar="DIR", help=rundir_help)
parser.addoption(
"--shell",
metavar="ROUTER[,ROUTER...]",
@ -120,7 +145,7 @@ def check_for_memleaks():
latest = []
existing = []
if tgen is not None:
logdir = "/tmp/topotests/{}".format(tgen.modname)
logdir = tgen.logdir
if hasattr(tgen, "valgrind_existing_files"):
existing = tgen.valgrind_existing_files
latest = glob.glob(os.path.join(logdir, "*.valgrind.*"))
@ -132,7 +157,7 @@ def check_for_memleaks():
vfcontent = vf.read()
match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent)
if match and match.group(1) != "0":
emsg = '{} in {}'.format(match.group(1), vfile)
emsg = "{} in {}".format(match.group(1), vfile)
leaks.append(emsg)
if leaks:
@ -142,6 +167,16 @@ def check_for_memleaks():
logger.error("Memleaks found:\n\t" + "\n\t".join(leaks))
def pytest_runtest_logstart(nodeid, location):
# location is (filename, lineno, testname)
topolog.logstart(nodeid, location, topotest_extra_config["rundir"])
def pytest_runtest_logfinish(nodeid, location):
# location is (filename, lineno, testname)
topolog.logfinish(nodeid, location)
def pytest_runtest_call():
"""
This function must be run after setup_module(), it does standarized post
@ -151,7 +186,7 @@ def pytest_runtest_call():
tgen = get_topogen()
if tgen is not None:
# Allow user to play with the setup.
tgen.mininet_cli()
tgen.cli()
pytest.exit("the topology executed successfully")
@ -176,8 +211,56 @@ def pytest_configure(config):
Assert that the environment is correctly configured, and get extra config.
"""
if not diagnose_env():
pytest.exit("environment has errors, please read the logs")
if "PYTEST_XDIST_WORKER" not in os.environ:
os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
os.environ["PYTEST_TOPOTEST_WORKER"] = ""
is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
is_worker = False
else:
os.environ["PYTEST_TOPOTEST_WORKER"] = os.environ["PYTEST_XDIST_WORKER"]
is_xdist = True
is_worker = True
# -----------------------------------------------------
# Set some defaults for the pytest.ini [pytest] section
# ---------------------------------------------------
rundir = config.getoption("--rundir")
if not rundir:
rundir = config.getini("rundir")
if not rundir:
rundir = "/tmp/topotests"
if not config.getoption("--junitxml"):
config.option.xmlpath = os.path.join(rundir, "topotests.xml")
xmlpath = config.option.xmlpath
# Save an existing topotest.xml
if os.path.exists(xmlpath):
fmtime = time.localtime(os.path.getmtime(xmlpath))
suffix = "-" + time.strftime("%Y%m%d%H%M%S", fmtime)
commander = Commander("pytest")
mv_path = commander.get_exec_path("mv")
commander.cmd_status([mv_path, xmlpath, xmlpath + suffix])
topotest_extra_config["rundir"] = rundir
# Set the log_file (exec) to inside the rundir if not specified
if not config.getoption("--log-file") and not config.getini("log_file"):
config.option.log_file = os.path.join(rundir, "exec.log")
# Turn on live logging if user specified verbose and the config has a CLI level set
if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
if config.getoption("--log-cli-level", None) is None:
# By setting the CLI option to the ini value it enables log_cli=1
cli_level = config.getini("log_cli_level")
if cli_level is not None:
config.option.log_cli_level = cli_level
# ---------------------------------------
# Record our options in global dictionary
# ---------------------------------------
topotest_extra_config["rundir"] = rundir
asan_abort = config.getoption("--asan-abort")
topotest_extra_config["asan_abort"] = asan_abort
@ -194,8 +277,8 @@ def pytest_configure(config):
gdb_breakpoints = gdb_breakpoints.split(",") if gdb_breakpoints else []
topotest_extra_config["gdb_breakpoints"] = gdb_breakpoints
mincli_on_error = config.getoption("--mininet-on-error")
topotest_extra_config["mininet_on_error"] = mincli_on_error
cli_on_error = config.getoption("--cli-on-error")
topotest_extra_config["cli_on_error"] = cli_on_error
shell = config.getoption("--shell")
topotest_extra_config["shell"] = shell.split(",") if shell else []
@ -203,8 +286,6 @@ def pytest_configure(config):
strace = config.getoption("--strace-daemons")
topotest_extra_config["strace_daemons"] = strace.split(",") if strace else []
pause_after = config.getoption("--pause-after")
shell_on_error = config.getoption("--shell-on-error")
topotest_extra_config["shell_on_error"] = shell_on_error
@ -217,17 +298,44 @@ def pytest_configure(config):
vtysh_on_error = config.getoption("--vtysh-on-error")
topotest_extra_config["vtysh_on_error"] = vtysh_on_error
topotest_extra_config["pause_after"] = pause_after or shell or vtysh
pause_on_error = vtysh or shell or config.getoption("--pause-on-error")
if config.getoption("--no-pause-on-error"):
pause_on_error = False
topotest_extra_config["pause_on_error"] = pause_on_error
topotest_extra_config["pause"] = config.getoption("--pause")
topotest_extra_config["topology_only"] = config.getoption("--topology-only")
# Check environment now that we have config
if not diagnose_env(rundir):
pytest.exit("environment has errors, please read the logs")
@pytest.fixture(autouse=True, scope="session")
def setup_session_auto():
if "PYTEST_TOPOTEST_WORKER" not in os.environ:
is_worker = False
elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
is_worker = False
else:
is_worker = True
logger.debug("Before the run (is_worker: %s)", is_worker)
if not is_worker:
cleanup_previous()
yield
if not is_worker:
cleanup_current()
logger.debug("After the run (is_worker: %s)", is_worker)
def pytest_runtest_makereport(item, call):
"Log all assert messages to default logger with error level"
# Nothing happened
if call.when == "call":
pause = topotest_extra_config["pause_after"]
pause = topotest_extra_config["pause"]
else:
pause = False
@ -237,6 +345,8 @@ def pytest_runtest_makereport(item, call):
except:
call.excinfo = ExceptionInfo()
title='unset'
if call.excinfo is None:
error = False
else:
@ -261,11 +371,15 @@ def pytest_runtest_makereport(item, call):
modname, item.name, call.excinfo.value
)
)
title = "{}/{}".format(modname, item.name)
# We want to pause, if requested, on any error not just test cases
# (e.g., call.when == "setup")
if not pause:
pause = topotest_extra_config["pause_after"]
pause = (
topotest_extra_config["pause_on_error"]
or topotest_extra_config["pause"]
)
# (topogen) Set topology error to avoid advancing in the test.
tgen = get_topogen()
@ -273,23 +387,75 @@ def pytest_runtest_makereport(item, call):
# This will cause topogen to report error on `routers_have_failure`.
tgen.set_error("{}/{}".format(modname, item.name))
if error and topotest_extra_config["shell_on_error"]:
for router in tgen.routers():
pause = True
tgen.net[router].runInWindow(os.getenv("SHELL", "bash"))
commander = Commander("pytest")
isatty = sys.stdout.isatty()
error_cmd = None
if error and topotest_extra_config["vtysh_on_error"]:
for router in tgen.routers():
error_cmd = commander.get_exec_path(["vtysh"])
elif error and topotest_extra_config["shell_on_error"]:
error_cmd = os.getenv("SHELL", commander.get_exec_path(["bash"]))
if error_cmd:
# Really would like something better than using this global here.
# Not all tests use topogen though so get_topogen() won't work.
win_info = None
wait_for_channels = []
for node in Mininet.g_mnet_inst.hosts.values():
pause = True
tgen.net[router].runInWindow("vtysh")
if error and topotest_extra_config["mininet_on_error"]:
tgen.mininet_cli()
channel = "{}-{}".format(os.getpid(), Commander.tmux_wait_gen) if not isatty else None
Commander.tmux_wait_gen += 1
wait_for_channels.append(channel)
if pause:
pane_info = node.run_in_window(
error_cmd,
new_window=win_info is None,
background=True,
title="{} ({})".format(title, node.name),
name=title,
tmux_target=win_info,
wait_for=channel
)
if win_info is None:
win_info = pane_info
# Now wait on any channels
for channel in wait_for_channels:
logger.debug("Waiting on TMUX channel %s", channel)
commander.cmd_raises([commander.get_exec_path("tmux"), "wait", channel])
if error and topotest_extra_config["cli_on_error"]:
# Really would like something better than using this global here.
# Not all tests use topogen though so get_topogen() won't work.
if Mininet.g_mnet_inst:
cli(Mininet.g_mnet_inst, title=title, background=False)
else:
logger.error("Could not launch CLI b/c no mininet exists yet")
while pause and isatty:
try:
user = raw_input('Testing paused, "pdb" to debug, "Enter" to continue: ')
user = raw_input(
'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
)
except NameError:
user = input('Testing paused, "pdb" to debug, "Enter" to continue: ')
if user.strip() == "pdb":
user = input(
'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
)
user = user.strip()
if user == "cli":
cli(Mininet.g_mnet_inst)
elif user == "pdb":
pdb.set_trace()
elif user:
print('Unrecognized input: "%s"' % user)
else:
break
#
# Add common fixtures available to all tests as parameters
#
tgen = pytest.fixture(lib.fixtures.tgen)
topo = pytest.fixture(lib.fixtures.topo)

View File

@ -18,40 +18,39 @@
# OF THIS SOFTWARE.
#
from copy import deepcopy
from time import sleep
import traceback
import ipaddr
import ipaddress
import os
import sys
from lib import topotest
from lib.topolog import logger
import traceback
from copy import deepcopy
from time import sleep
from lib.topogen import TopoRouter, get_topogen
from lib.topotest import frr_unicode
import ipaddr
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configurations,
InvalidCLIError,
load_config_to_router,
check_address_types,
generate_ips,
validate_ip_address,
find_interface_with_greater_ip,
run_frr_cmd,
FRRCFG_FILE,
retry,
InvalidCLIError,
check_address_types,
create_common_configuration,
find_interface_with_greater_ip,
generate_ips,
get_frr_ipv6_linklocal,
get_ipv6_linklocal_address,
get_frr_ipv6_linklocal
load_config_to_router,
retry,
run_frr_cmd,
validate_ip_address,
)
from lib.topogen import TopoRouter, get_topogen
from lib.topolog import logger
from lib.topotest import frr_unicode
LOGDIR = "/tmp/topotests/"
TMPDIR = None
from lib import topotest
def create_router_bgp(tgen, topo, input_dict=None, build=False, load_config=True):
def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure bgp on router
@ -139,6 +138,9 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False, load_config=True
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
if topo is None:
topo = tgen.json_topo
# Flag is used when testing ipv6 over ipv4 or vice-versa
afi_test = False
@ -1096,9 +1098,6 @@ def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
global LOGDIR
result = create_router_bgp(
tgen, topo, input_dict, build=False, load_config=False
)
@ -1112,13 +1111,10 @@ def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
if router != dut:
continue
TMPDIR = os.path.join(LOGDIR, tgen.modname)
logger.info("Delete BGP config when BGPd is down in {}".format(router))
# Reading the config from /tmp/topotests and
# copy to /etc/frr/bgpd.conf
# Reading the config from "rundir" and copy to /etc/frr/bgpd.conf
cmd = "cat {}/{}/{} >> /etc/frr/bgpd.conf".format(
TMPDIR, router, FRRCFG_FILE
tgen.logdir, router, FRRCFG_FILE
)
router_list[router].run(cmd)
@ -1207,7 +1203,7 @@ def verify_router_id(tgen, topo, input_dict, expected=True):
@retry(retry_timeout=150)
def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
@ -1230,6 +1226,9 @@ def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
errormsg(str) or True
"""
if topo is None:
topo = tgen.json_topo
result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
tgen = get_topogen()

View File

@ -18,37 +18,36 @@
# OF THIS SOFTWARE.
#
import ipaddress
import json
import os
import platform
import signal
import socket
import subprocess
import sys
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from time import sleep
from copy import deepcopy
from datetime import datetime, timedelta
from functools import wraps
from re import search as re_search
from tempfile import mkdtemp
import json
import logging
import os
import sys
import traceback
import socket
import subprocess
import ipaddress
import platform
import pytest
from time import sleep
try:
# Imports from python2
from StringIO import StringIO
import ConfigParser as configparser
from StringIO import StringIO
except ImportError:
# Imports from python3
from io import StringIO
import configparser
from io import StringIO
from lib.topolog import logger, logger_config
from lib.micronet_compat import Mininet
from lib.topogen import TopoRouter, get_topogen
from lib.topotest import interface_set_status, version_cmp, frr_unicode
from lib.topolog import get_logger, logger
from lib.topotest import frr_unicode, interface_set_status, version_cmp
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
@ -60,13 +59,6 @@ ROUTER_LIST = []
CD = os.path.dirname(os.path.realpath(__file__))
PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
# Creating tmp dir with testsuite name to avoid conflict condition when
# multiple testsuites run together. All temporary files would be created
# in this dir and this dir would be removed once testsuite run is
# completed
LOGDIR = "/tmp/topotests/"
TMPDIR = None
# NOTE: to save execution logs to log file frrtest_log_dir must be configured
# in `pytest.ini`.
config = configparser.ConfigParser()
@ -138,6 +130,9 @@ DEBUG_LOGS = {
],
}
g_iperf_client_procs = {}
g_iperf_server_procs = {}
def is_string(value):
try:
return isinstance(value, basestring)
@ -146,9 +141,9 @@ def is_string(value):
if config.has_option("topogen", "verbosity"):
loglevel = config.get("topogen", "verbosity")
loglevel = loglevel.upper()
loglevel = loglevel.lower()
else:
loglevel = "INFO"
loglevel = "info"
if config.has_option("topogen", "frrtest_log_dir"):
frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
@ -157,9 +152,7 @@ if config.has_option("topogen", "frrtest_log_dir"):
frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
print("frrtest_log_file..", frrtest_log_file)
logger = logger_config.get_logger(
name="test_execution_logs", log_level=loglevel, target=frrtest_log_file
)
logger = get_logger("test_execution_logs", log_level=loglevel, target=frrtest_log_file)
print("Logs will be sent to logfile: {}".format(frrtest_log_file))
if config.has_option("topogen", "show_router_config"):
@ -284,7 +277,7 @@ def apply_raw_config(tgen, input_dict):
if not isinstance(config_cmd, list):
config_cmd = [config_cmd]
frr_cfg_file = "{}/{}/{}".format(TMPDIR, router_name, FRRCFG_FILE)
frr_cfg_file = "{}/{}/{}".format(tgen.logdir, router_name, FRRCFG_FILE)
with open(frr_cfg_file, "w") as cfg:
for cmd in config_cmd:
cfg.write("{}\n".format(cmd))
@ -314,7 +307,6 @@ def create_common_configurations(
-------
True or False
"""
TMPDIR = os.path.join(LOGDIR, tgen.modname)
config_map = OrderedDict(
{
@ -342,7 +334,7 @@ def create_common_configurations(
routers = config_dict.keys()
for router in routers:
fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
fname = "{}/{}/{}".format(tgen.logdir, router, FRRCFG_FILE)
try:
frr_cfg_fd = open(fname, mode)
if config_type:
@ -504,9 +496,9 @@ def reset_config_on_routers(tgen, routerName=None):
return True
router_list = { routerName: router_list[routerName] }
delta_fmt = TMPDIR + "/{}/delta.conf"
init_cfg_fmt = TMPDIR + "/{}/frr_json_initial.conf"
run_cfg_fmt = TMPDIR + "/{}/frr.sav"
delta_fmt = tgen.logdir + "/{}/delta.conf"
init_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
run_cfg_fmt = tgen.logdir + "/{}/frr.sav"
#
# Get all running configs in parallel
@ -532,7 +524,7 @@ def reset_config_on_routers(tgen, routerName=None):
procs = {}
for rname in router_list:
logger.info("Generating delta for router %s to new configuration", rname)
procs[rname] = subprocess.Popen(
procs[rname] = tgen.net.popen(
[ "/usr/lib/frr/frr-reload.py",
"--test-reset",
"--input",
@ -630,8 +622,8 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
continue
router_list[router] = base_router_list[router]
frr_cfg_file_fmt = TMPDIR + "/{}/" + FRRCFG_FILE
frr_cfg_bkup_fmt = TMPDIR + "/{}/" + FRRCFG_BKUP_FILE
frr_cfg_file_fmt = tgen.logdir + "/{}/" + FRRCFG_FILE
frr_cfg_bkup_fmt = tgen.logdir + "/{}/" + FRRCFG_BKUP_FILE
procs = {}
for rname in router_list:
@ -642,8 +634,8 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
with open(frr_cfg_file, "r+") as cfg:
data = cfg.read()
logger.info(
"Applying following configuration on router"
" {}:\n{}".format(rname, data)
"Applying following configuration on router %s (gen: %d):\n%s",
rname, gen, data
)
if save_bkup:
with open(frr_cfg_bkup, "w") as bkup:
@ -808,26 +800,18 @@ def generate_support_bundle():
router_list = tgen.routers()
test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
TMPDIR = os.path.join(LOGDIR, tgen.modname)
bundle_procs = {}
for rname, rnode in router_list.items():
logger.info("Spawn collection of support bundle for %s", rname)
rnode.run("mkdir -p /var/log/frr")
bundle_procs[rname] = tgen.net[rname].popen(
"/usr/lib/frr/generate_support_bundle.py",
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
dst_bundle = "{}/{}/support_bundles/{}".format(tgen.logdir, rname, test_name)
rnode.run("mkdir -p " + dst_bundle)
gen_sup_cmd = ["/usr/lib/frr/generate_support_bundle.py", "--log-dir=" + dst_bundle]
bundle_procs[rname] = tgen.net[rname].popen(gen_sup_cmd, stdin=None)
for rname, rnode in router_list.items():
dst_bundle = "{}/{}/support_bundles/{}".format(TMPDIR, rname, test_name)
src_bundle = "/var/log/frr"
logger.info("Waiting on support bundle for %s", rname)
output, error = bundle_procs[rname].communicate()
logger.info("Saving support bundle for %s", rname)
if output:
logger.info(
"Output from collecting support bundle for %s:\n%s", rname, output
@ -836,9 +820,6 @@ def generate_support_bundle():
logger.warning(
"Error from collecting support bundle for %s:\n%s", rname, error
)
rnode.run("rm -rf {}".format(dst_bundle))
rnode.run("mkdir -p {}".format(dst_bundle))
rnode.run("mv -f {}/* {}".format(src_bundle, dst_bundle))
return True
@ -850,7 +831,7 @@ def start_topology(tgen, daemon=None):
* `tgen` : topogen object
"""
global TMPDIR, ROUTER_LIST
global ROUTER_LIST
# Starting topology
tgen.start_topology()
@ -860,7 +841,6 @@ def start_topology(tgen, daemon=None):
ROUTER_LIST = sorted(
router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
)
TMPDIR = os.path.join(LOGDIR, tgen.modname)
linux_ver = ""
router_list = tgen.routers()
@ -874,49 +854,50 @@ def start_topology(tgen, daemon=None):
logger.info("Logging platform related details: \n %s \n", linux_ver)
try:
os.chdir(TMPDIR)
os.chdir(tgen.logdir)
# Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
if os.path.isdir("{}".format(rname)):
os.system("rm -rf {}".format(rname))
os.mkdir("{}".format(rname))
os.system("chmod -R go+rw {}".format(rname))
os.chdir("{}/{}".format(TMPDIR, rname))
os.system("touch zebra.conf bgpd.conf")
else:
os.mkdir("{}".format(rname))
os.system("chmod -R go+rw {}".format(rname))
os.chdir("{}/{}".format(TMPDIR, rname))
os.system("touch zebra.conf bgpd.conf")
# # Creating router named dir and empty zebra.conf bgpd.conf files
# # inside the current directory
# if os.path.isdir("{}".format(rname)):
# os.system("rm -rf {}".format(rname))
# os.mkdir("{}".format(rname))
# os.system("chmod -R go+rw {}".format(rname))
# os.chdir("{}/{}".format(tgen.logdir, rname))
# os.system("touch zebra.conf bgpd.conf")
# else:
# os.mkdir("{}".format(rname))
# os.system("chmod -R go+rw {}".format(rname))
# os.chdir("{}/{}".format(tgen.logdir, rname))
# os.system("touch zebra.conf bgpd.conf")
except IOError as err:
logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
# Loading empty zebra.conf file to router, to start the zebra daemon
router.load_config(
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname)
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
)
# Loading empty bgpd.conf file to router, to start the bgp daemon
router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname))
router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname))
if daemon and "ospfd" in daemon:
# Loading empty ospf.conf file to router, to start the bgp daemon
router.load_config(
TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname)
TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(tgen.logdir, rname)
)
if daemon and "ospf6d" in daemon:
# Loading empty ospf.conf file to router, to start the bgp daemon
router.load_config(
TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(TMPDIR, rname)
TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(tgen.logdir, rname)
)
if daemon and "pimd" in daemon:
# Loading empty pimd.conf file to router, to start the pim deamon
router.load_config(
TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(TMPDIR, rname)
TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
)
# Starting routers
@ -991,12 +972,15 @@ def number_to_column(routerName):
return ord(routerName[0]) - 97
def topo_daemons(tgen, topo):
def topo_daemons(tgen, topo=None):
"""
Returns daemon list required for the suite based on topojson.
"""
daemon_list = []
if topo is None:
topo = tgen.json_topo
router_list = tgen.routers()
ROUTER_LIST = sorted(
router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
@ -1047,7 +1031,7 @@ def add_interfaces_to_vlan(tgen, input_dict):
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = router_list[dut]
if "vlan" in input_dict[dut]:
for vlan, interfaces in input_dict[dut]["vlan"].items():
@ -1123,7 +1107,7 @@ def tcpdump_capture_start(
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[router]
rnode = tgen.gears[router]
if timeout > 0:
cmd = "timeout {}".format(timeout)
@ -1140,7 +1124,7 @@ def tcpdump_capture_start(
cmdargs += " -s 0 {}".format(str(options))
if cap_file:
file_name = os.path.join(LOGDIR, tgen.modname, router, cap_file)
file_name = os.path.join(tgen.logdir, router, cap_file)
cmdargs += " -w {}".format(str(file_name))
# Remove existing capture file
rnode.run("rm -rf {}".format(file_name))
@ -1152,7 +1136,9 @@ def tcpdump_capture_start(
if not background:
rnode.run(cmdargs)
else:
rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs))
# XXX this & is bogus doesn't work
# rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs))
rnode.run("nohup {} > /dev/null 2>&1".format(cmdargs))
# Check if tcpdump process is running
if background:
@ -1199,7 +1185,7 @@ def tcpdump_capture_stop(tgen, router):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[router]
rnode = tgen.gears[router]
# Check if tcpdump process is running
result = rnode.run("ps -ef | grep tcpdump")
@ -1209,6 +1195,7 @@ def tcpdump_capture_stop(tgen, router):
errormsg = "tcpdump is not running {}".format("tcpdump")
return errormsg
else:
# XXX this doesn't work with micronet
ppid = tgen.net.nameToNode[rnode.name].pid
rnode.run("set +m; pkill -P %s tcpdump &> /dev/null" % ppid)
logger.info("Stopped tcpdump capture")
@ -1268,7 +1255,7 @@ def create_debug_log_config(tgen, input_dict, build=False):
log_file = debug_dict.setdefault("log_file", None)
if log_file:
_log_file = os.path.join(LOGDIR, tgen.modname, log_file)
_log_file = os.path.join(tgen.logdir, log_file)
debug_config.append("log file {} \n".format(_log_file))
if type(enable_logs) is list:
@ -1374,9 +1361,8 @@ def create_vrf_cfg(tgen, topo, input_dict=None, build=False):
config_data_dict = {}
for c_router, c_data in input_dict.items():
rnode = tgen.routers()[c_router]
rnode = tgen.gears[c_router]
config_data = []
if "vrfs" in c_data:
for vrf in c_data["vrfs"]:
del_action = vrf.setdefault("delete", False)
@ -1490,7 +1476,7 @@ def create_interface_in_kernel(
to create
"""
rnode = tgen.routers()[dut]
rnode = tgen.gears[dut]
if create:
cmd = "ip link show {0} >/dev/null || ip link add {0} type dummy".format(name)
@ -1528,7 +1514,7 @@ def shutdown_bringup_interface_in_kernel(tgen, dut, intf_name, ifaceaction=False
ineterface
"""
rnode = tgen.routers()[dut]
rnode = tgen.gears[dut]
cmd = "ip link set dev"
if ifaceaction:
@ -1737,7 +1723,7 @@ def interface_status(tgen, topo, input_dict):
interface_list = input_dict[router]["interface_list"]
status = input_dict[router].setdefault("status", "up")
for intf in interface_list:
rnode = tgen.routers()[router]
rnode = tgen.gears[router]
interface_set_status(rnode, intf, status)
rlist.append(router)
@ -2797,7 +2783,7 @@ def addKernelRoute(
logger.debug("Entering lib API: addKernelRoute()")
rnode = tgen.routers()[router]
rnode = tgen.gears[router]
if type(group_addr_range) is not list:
group_addr_range = [group_addr_range]
@ -2879,7 +2865,7 @@ def configure_vxlan(tgen, input_dict):
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = router_list[dut]
if "vxlan" in input_dict[dut]:
for vxlan_dict in input_dict[dut]["vxlan"]:
@ -2978,7 +2964,7 @@ def configure_brctl(tgen, topo, input_dict):
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = router_list[dut]
if "brctl" in input_dict[dut]:
for brctl_dict in input_dict[dut]["brctl"]:
@ -3064,7 +3050,7 @@ def configure_interface_mac(tgen, input_dict):
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = router_list[dut]
for intf, mac in input_dict[dut].items():
cmd = "ip link set {} address {}".format(intf, mac)
@ -3535,7 +3521,11 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
if dut not in router_list:
return
for routerInput in input_dict.keys():
# XXX replace with router = dut; rnode = router_list[dut]
for router, rnode in router_list.items():
if router != dut:
continue
@ -3780,11 +3770,11 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for router in input_dict.keys():
if router not in tgen.routers():
if router not in router_list:
continue
rnode = tgen.routers()[router]
rnode = router_list[router]
for static_route in input_dict[router]["static_routes"]:
addr_type = validate_ip_address(static_route["network"])
@ -3862,11 +3852,12 @@ def verify_prefix_lists(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for router in input_dict.keys():
if router not in tgen.routers():
if router not in router_list:
continue
rnode = tgen.routers()[router]
rnode = router_list[router]
# Show ip prefix list
show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
@ -3925,11 +3916,12 @@ def verify_route_maps(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for router in input_dict.keys():
if router not in tgen.routers():
if router not in router_list:
continue
rnode = tgen.routers()[router]
rnode = router_list[router]
# Show ip route-map
show_route_maps = rnode.vtysh_cmd("show route-map")
@ -3978,10 +3970,11 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if router not in tgen.routers():
router_list = tgen.routers()
if router not in router_list:
return False
rnode = tgen.routers()[router]
rnode = router_list[router]
logger.debug(
"Verifying BGP community attributes on dut %s: for %s " "network %s",
@ -4108,11 +4101,12 @@ def verify_create_community_list(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for router in input_dict.keys():
if router not in tgen.routers():
if router not in router_list:
continue
rnode = tgen.routers()[router]
rnode = router_list[router]
logger.info("Verifying large-community is created for dut %s:", router)
@ -4163,7 +4157,7 @@ def verify_cli_json(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = tgen.gears[dut]
for cli in input_dict[dut]["cli"]:
logger.info(
@ -4225,7 +4219,7 @@ def verify_evpn_vni(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = tgen.gears[dut]
logger.info("[DUT: %s]: Verifying evpn vni details :", dut)
@ -4343,7 +4337,7 @@ def verify_vrf_vni(tgen, input_dict):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
rnode = tgen.gears[dut]
logger.info("[DUT: %s]: Verifying vrf vni details :", dut)
@ -4447,9 +4441,7 @@ def required_linux_kernel_version(required_version):
return True
def iperfSendIGMPJoin(
tgen, server, bindToAddress, l4Type="UDP", join_interval=1, inc_step=0, repeat=0
):
def iperfSendIGMPJoin(tgen, server, bindToAddress, l4Type="UDP", join_interval=1):
"""
Run iperf to send IGMP join and traffic
@ -4461,8 +4453,6 @@ def iperfSendIGMPJoin(
* `bindToAddress`: bind to <host>, an interface or multicast
address
* `join_interval`: seconds between periodic bandwidth reports
* `inc_step`: increamental steps, by default 0
* `repeat`: Repetition of group, by default 0
returns:
--------
@ -4471,53 +4461,43 @@ def iperfSendIGMPJoin(
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[server]
rnode = tgen.gears[server]
iperfArgs = "iperf -s "
iperf_path = tgen.net.get_exec_path("iperf")
# UDP/TCP
if l4Type == "UDP":
iperfArgs += "-u "
iperfCmd = iperfArgs
# Group address range to cover
if bindToAddress:
if type(bindToAddress) is not list:
Address = []
start = ipaddress.IPv4Address(frr_unicode(bindToAddress))
Address = [start]
next_ip = start
count = 1
while count < repeat:
next_ip += inc_step
Address.append(next_ip)
count += 1
bindToAddress = Address
if bindToAddress and not isinstance(bindToAddress, list):
bindToAddress = [ipaddress.IPv4Address(frr_unicode(bindToAddress))]
for bindTo in bindToAddress:
iperfArgs = iperfCmd
iperfArgs += "-B %s " % bindTo
iperf_args = [iperf_path, "-s"]
# UDP/TCP
if l4Type == "UDP":
iperf_args.append("-u")
iperf_args.append("-B")
iperf_args.append(str(bindTo))
# Join interval
if join_interval:
iperfArgs += "-i %d " % join_interval
iperf_args.append("-i")
iperf_args.append(str(join_interval))
iperfArgs += " &>/dev/null &"
# Run iperf command to send IGMP join
logger.debug("[DUT: {}]: Running command: [{}]".format(server, iperfArgs))
output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs))
logger.debug("[DUT: %s]: Running command: %s",server, iperf_args)
# Check if iperf process is running
if output:
pid = output.split()[1]
rnode.run("touch /var/run/frr/iperf_server.pid")
rnode.run("echo %s >> /var/run/frr/iperf_server.pid" % pid)
else:
errormsg = "IGMP join is not sent for {}. Error: {}".format(bindTo, output)
logger.error(output)
return errormsg
p = rnode.popen(iperf_args, stderr=subprocess.STDOUT)
rc = p.poll()
if rc is not None:
output, _ = p.communicate()
if rc:
errormsg = "IGMP join is not sent for {}. Error: {}".format(bindTo, output)
logger.error("%s", output)
return errormsg
if server not in g_iperf_server_procs:
g_iperf_server_procs[server] = []
g_iperf_server_procs[server].append(p)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@ -4526,13 +4506,11 @@ def iperfSendIGMPJoin(
def iperfSendTraffic(
tgen,
client,
bindToAddress,
sentToAddress,
ttl,
time=0,
l4Type="UDP",
inc_step=0,
repeat=0,
mappedAddress=None,
bindToIntf=None,
):
"""
Run iperf to send IGMP join and traffic
@ -4542,13 +4520,11 @@ def iperfSendTraffic(
* `tgen` : Topogen object
* `l4Type`: string, one of [ TCP, UDP ]
* `client`: iperf client, from where iperf traffic would be sent
* `bindToAddress`: bind to <host>, an interface or multicast
* `sentToAddress`: bind to <host>, an interface or multicast
address
* `ttl`: time to live
* `time`: time in seconds to transmit for
* `inc_step`: increamental steps, by default 0
* `repeat`: Repetition of group, by default 0
* `mappedAddress`: Mapped Interface ip address
* `bindToIntf`: Source interface ip address
returns:
--------
@ -4557,64 +4533,56 @@ def iperfSendTraffic(
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[client]
rnode = tgen.gears[client]
iperfArgs = "iperf -c "
iperf_path = tgen.net.get_exec_path("iperf")
iperfCmd = iperfArgs
# Group address range to cover
if bindToAddress:
if type(bindToAddress) is not list:
Address = []
start = ipaddress.IPv4Address(frr_unicode(bindToAddress))
if sentToAddress and not isinstance(sentToAddress, list):
sentToAddress = [ipaddress.IPv4Address(frr_unicode(sentToAddress))]
Address = [start]
next_ip = start
for sendTo in sentToAddress:
iperf_args = [iperf_path, "-c", sendTo]
count = 1
while count < repeat:
next_ip += inc_step
Address.append(next_ip)
count += 1
bindToAddress = Address
for bindTo in bindToAddress:
iperfArgs = iperfCmd
iperfArgs += "%s " % bindTo
# Mapped Interface IP
if mappedAddress:
iperfArgs += "-B %s " % mappedAddress
# Bind to Interface IP
if bindToIntf:
ifaddr = frr_unicode(tgen.json_topo["routers"][client]["links"][bindToIntf]["ipv4"])
ipaddr = ipaddress.IPv4Interface(ifaddr).ip
iperf_args.append("-B")
iperf_args.append(str(ipaddr))
# UDP/TCP
if l4Type == "UDP":
iperfArgs += "-u -b 0.012m "
iperf_args.append("-u")
iperf_args.append("-b")
iperf_args.append("0.012m")
# TTL
if ttl:
iperfArgs += "-T %d " % ttl
iperf_args.append("-T")
iperf_args.append(str(ttl))
# Time
if time:
iperfArgs += "-t %d " % time
iperfArgs += " &>/dev/null &"
iperf_args.append("-t")
iperf_args.append(str(time))
# Run iperf command to send multicast traffic
logger.debug("[DUT: {}]: Running command: [{}]".format(client, iperfArgs))
output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs))
logger.debug("[DUT: {}]: Running command: {}".format(client, iperf_args))
# Check if iperf process is running
if output:
pid = output.split()[1]
rnode.run("touch /var/run/frr/iperf_client.pid")
rnode.run("echo %s >> /var/run/frr/iperf_client.pid" % pid)
else:
errormsg = "Multicast traffic is not sent for {}. Error {}".format(
bindTo, output
)
logger.error(output)
return errormsg
p = rnode.popen(iperf_args, stderr=subprocess.STDOUT)
rc = p.poll()
if rc is not None:
output, _ = p.communicate()
if rc:
errormsg = "Multicast traffic is not sent for {}. Error {}".format(
sendTo, output
)
logger.error(output)
return errormsg
if client not in g_iperf_client_procs:
g_iperf_client_procs[client] = []
g_iperf_client_procs[client].append(p)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@ -4637,24 +4605,36 @@ def kill_iperf(tgen, dut=None, action=None):
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
logger.debug("Running iperfs: clients: %s servers: %s", g_iperf_client_procs, g_iperf_server_procs)
router_list = tgen.routers()
for router, rnode in router_list.items():
# Run iperf command to send IGMP join
pid_client = rnode.run("cat /var/run/frr/iperf_client.pid")
pid_server = rnode.run("cat /var/run/frr/iperf_server.pid")
if dut is not None:
nodes = [dut]
else:
nodes = sorted(tgen.gears.keys())
for name in nodes:
logger.debug("Checking for iperfs on %s", name)
if action == "remove_join":
pids = pid_server
procs = g_iperf_server_procs[name] if name in g_iperf_server_procs else []
g_iperf_server_procs[name] = []
elif action == "remove_traffic":
pids = pid_client
procs = g_iperf_client_procs[name] if name in g_iperf_client_procs else []
g_iperf_client_procs[name] = []
else:
pids = "\n".join([pid_client, pid_server])
for pid in pids.split("\n"):
pid = pid.strip()
if pid.isdigit():
cmd = "set +m; kill -9 %s &> /dev/null" % pid
logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd))
rnode.run(cmd)
procs = []
if name in g_iperf_server_procs:
procs.extend(g_iperf_server_procs[name])
g_iperf_server_procs[name] = []
if name in g_iperf_client_procs:
procs.extend(g_iperf_client_procs[name])
g_iperf_client_procs[name] = []
for p in procs:
logger.info("[DUT: {}]: Terminating iperf: [{}]".format(name, p.pid))
# p.send_signal(signal.SIGHUP)
p.terminate()
for p in procs:
logger.info("[DUT: {}]: Waiting for iperf to terminate: [{}]".format(name, p.pid))
p.wait()
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@ -4689,14 +4669,15 @@ def verify_ip_nht(tgen, input_dict):
logger.debug("Entering lib API: verify_ip_nht()")
router_list = tgen.routers()
for router in input_dict.keys():
if router not in tgen.routers():
if router not in router_list:
continue
rnode = tgen.routers()[router]
rnode = router_list[router]
nh_list = input_dict[router]
if validate_ip_address(nh_list.keys()[0]) is "ipv6":
if validate_ip_address(next(iter(nh_list))) is "ipv6":
show_ip_nht = run_frr_cmd(rnode, "show ipv6 nht")
else:
show_ip_nht = run_frr_cmd(rnode, "show ip nht")
@ -4714,7 +4695,7 @@ def verify_ip_nht(tgen, input_dict):
def scapy_send_raw_packet(
tgen, topo, senderRouter, intf, packet=None, interval=1, count=1
tgen, topo, senderRouter, intf, packet=None
):
"""
Using scapy Raw() method to send BSR raw packet from one FRR
@ -4726,8 +4707,6 @@ def scapy_send_raw_packet(
* `topo` : json file data
* `senderRouter` : Sender router
* `packet` : packet in raw format
* `interval` : Interval between the packets
* `count` : Number of packets to be sent
returns:
--------
@ -4749,20 +4728,13 @@ def scapy_send_raw_packet(
"data"
]
if interval > 1 or count > 1:
cmd = (
"nohup /usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
"--interval={} --count={} &".format(
CD, packet, sender_interface, interval, count
)
)
else:
cmd = (
"/usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
"--interval={} --count={}".format(
CD, packet, sender_interface, interval, count
)
python3_path = tgen.net.get_exec_path(["python3", "python"])
script_path = os.path.join(CD, "send_bsr_packet.py")
cmd = (
"{} {} '{}' '{}' --interval=1 --count=1".format(
python3_path, script_path, packet, sender_interface
)
)
logger.info("Scapy cmd: \n %s", cmd)
result = rnode.run(cmd)

View File

@ -0,0 +1,46 @@
# -*- coding: utf-8 eval: (yapf-mode 1) -*-
#
# August 27 2021, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN")
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lib.topojson as topojson
import lib.topogen as topogen
from lib.topolog import logger
def tgen_json(request):
logger.info("Creating/starting topogen topology for %s", request.module.__name__)
tgen = topojson.setup_module_from_json(request.module.__file__)
yield tgen
logger.info("Stopping topogen topology for %s", request.module.__name__)
tgen.stop_topology()
def topo(tgen):
"""Make tgen json object available as test argument."""
return tgen.json_topo
def tgen():
"""Make global topogen object available as test argument."""
return topogen.get_topogen()

View File

@ -28,8 +28,8 @@ ltemplate.py: LabN template for FRR tests.
import os
import sys
import platform
import pytest
import imp
# pylint: disable=C0413
# Import topogen and topotest helpers
@ -43,7 +43,6 @@ from mininet.topo import Topo
customize = None
class LTemplate:
test = None
testdir = None
@ -54,12 +53,20 @@ class LTemplate:
iproute2Ver = None
def __init__(self, test, testdir):
pathname = os.path.join(testdir, "customize.py")
global customize
customize = imp.load_source("customize", os.path.join(testdir, "customize.py"))
if sys.version_info >= (3, 5):
import importlib.util
spec = importlib.util.spec_from_file_location("customize", pathname)
customize = importlib.util.module_from_spec(spec)
spec.loader.exec_module(customize)
else:
import imp
customize = imp.load_source("customize", pathname)
self.test = test
self.testdir = testdir
self.scriptdir = testdir
self.logdir = "/tmp/topotests/{0}.test_{0}".format(test)
self.logdir = ""
logger.info("LTemplate: " + test)
def setup_module(self, mod):
@ -69,6 +76,8 @@ class LTemplate:
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
self.logdir = tgen.logdir
logger.info("Topology started")
try:
self.prestarthooksuccess = customize.ltemplatePreRouterStartHook()

View File

@ -21,14 +21,15 @@ for the multicast group we subscribed to.
"""
import argparse
import os
import json
import os
import socket
import subprocess
import struct
import subprocess
import sys
import time
#
# Functions
#
@ -64,7 +65,7 @@ parser.add_argument('group', help='Multicast IP')
parser.add_argument('interface', help='Interface name')
parser.add_argument(
'--send',
help='Transmit instead of join with interval (defaults to 0.7 sec)',
help='Transmit instead of join with interval',
type=float, default=0)
args = parser.parse_args()

View File

@ -18,37 +18,35 @@
# OF THIS SOFTWARE.
#
import ipaddr
import ipaddress
import sys
import traceback
from copy import deepcopy
from time import sleep
from lib.topolog import logger
from lib.topotest import frr_unicode
from ipaddress import IPv6Address
import sys
from time import sleep
import ipaddr
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configurations,
InvalidCLIError,
retry,
generate_ips,
check_address_types,
validate_ip_address,
create_common_configuration,
generate_ips,
retry,
run_frr_cmd,
validate_ip_address,
)
LOGDIR = "/tmp/topotests/"
TMPDIR = None
from lib.topolog import logger
from lib.topotest import frr_unicode
################################
# Configure procs
################################
def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=True):
def create_router_ospf(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
@ -79,6 +77,9 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
logger.debug("Entering lib API: create_router_ospf()")
result = False
if topo is None:
topo = tgen.json_topo
if not input_dict:
input_dict = deepcopy(topo)
else:
@ -373,7 +374,7 @@ def __create_ospf_global(
return config_data
def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=True):
def create_router_ospf6(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router
@ -400,6 +401,9 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr
logger.debug("Entering lib API: create_router_ospf6()")
result = False
if topo is None:
topo = tgen.json_topo
if not input_dict:
input_dict = deepcopy(topo)
else:
@ -431,7 +435,7 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr
return result
def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=True):
def config_ospf_interface(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
@ -466,6 +470,10 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
"""
logger.debug("Enter lib config_ospf_interface")
result = False
if topo is None:
topo = tgen.json_topo
if not input_dict:
input_dict = deepcopy(topo)
else:
@ -632,7 +640,7 @@ def redistribute_ospf(tgen, topo, dut, route_type, **kwargs):
# Verification procs
################################
@retry(retry_timeout=80)
def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expected=True):
def verify_ospf_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False, expected=True):
"""
This API is to verify ospf neighborship by running
show ip ospf neighbour command,
@ -680,6 +688,9 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expec
"""
logger.debug("Entering lib API: verify_ospf_neighbor()")
result = False
if topo is None:
topo = tgen.json_topo
if input_dict:
for router, rnode in tgen.routers().items():
if "ospf" not in topo["routers"][router]:
@ -827,7 +838,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expec
# Verification procs
################################
@retry(retry_timeout=50)
def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
show ipv6 ospf neighbour command,
@ -875,6 +886,9 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
if topo is None:
topo = tgen.json_topo
if input_dict:
for router, rnode in tgen.routers().items():
if "ospf6" not in topo["routers"][router]:
@ -1318,7 +1332,7 @@ def verify_ospf_rib(
@retry(retry_timeout=20)
def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expected=True):
def verify_ospf_interface(tgen, topo=None, dut=None, lan=False, input_dict=None, expected=True):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@ -1360,6 +1374,9 @@ def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expe
logger.debug("Entering lib API: verify_ospf_interface()")
result = False
if topo is None:
topo = tgen.json_topo
for router, rnode in tgen.routers().items():
if "ospf" not in topo["routers"][router]:
continue
@ -1936,7 +1953,7 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
@retry(retry_timeout=6)
def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
def verify_ospf6_interface(tgen, topo=None, dut=None,lan=False, input_dict=None):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@ -1978,8 +1995,11 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
for router, rnode in tgen.routers().iteritems():
if "ospf6" not in topo["routers"][router]:
if topo is None:
topo = tgen.json_topo
for router, rnode in tgen.routers().items():
if 'ospf6' not in topo['routers'][router]:
continue
if dut is not None and dut != router:
@ -2315,7 +2335,7 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
return result
def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config=True):
def config_ospf6_interface(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
@ -2350,6 +2370,9 @@ def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
if topo is None:
topo = tgen.json_topo
if not input_dict:
input_dict = deepcopy(topo)
else:

View File

@ -16,24 +16,25 @@
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
import sys
import datetime
import os
import re
import datetime
import sys
import traceback
import pytest
from time import sleep
from copy import deepcopy
from lib.topolog import logger
from time import sleep
import pytest
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configuration,
create_common_configurations,
create_common_configuration,
InvalidCLIError,
retry,
run_frr_cmd,
)
from lib.topolog import logger
####
CWD = os.path.dirname(os.path.realpath(__file__))
@ -922,7 +923,8 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, ex
error = (
"[DUT %s]: Verifying join timer for"
" (%s,%s) [FAILED]!! "
" Expected: %s, Found: %s",
" Expected: %s, Found: %s"
) % (
dut,
src_address,
grp_addr,
@ -2028,9 +2030,7 @@ def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping):
return result
def scapy_send_bsr_raw_packet(
tgen, topo, senderRouter, receiverRouter, packet=None, interval=1, count=1
):
def scapy_send_bsr_raw_packet(tgen, topo, senderRouter, receiverRouter, packet=None):
"""
Using scapy Raw() method to send BSR raw packet from one FRR
to other
@ -2042,8 +2042,6 @@ def scapy_send_bsr_raw_packet(
* `senderRouter` : Sender router
* `receiverRouter` : Receiver router
* `packet` : BSR packet in raw format
* `interval` : Interval between the packets
* `count` : Number of packets to be sent
returns:
--------
@ -2054,7 +2052,9 @@ def scapy_send_bsr_raw_packet(
result = ""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[senderRouter]
python3_path = tgen.net.get_exec_path(["python3", "python"])
script_path = os.path.join(CWD, "send_bsr_packet.py")
node = tgen.net[senderRouter]
for destLink, data in topo["routers"][senderRouter]["links"].items():
if "type" in data and data["type"] == "loopback":
@ -2065,26 +2065,16 @@ def scapy_send_bsr_raw_packet(
packet = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["data"]
if interval > 1 or count > 1:
cmd = (
"nohup /usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
"--interval={} --count={} &".format(
CWD, packet, sender_interface, interval, count
)
)
else:
cmd = (
"/usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
"--interval={} --count={}".format(
CWD, packet, sender_interface, interval, count
)
)
cmd = [
python3_path,
script_path,
packet,
sender_interface,
"--interval=1",
"--count=1",
]
logger.info("Scapy cmd: \n %s", cmd)
result = rnode.run(cmd)
if result == "":
return result
node.cmd_raises(cmd)
logger.debug("Exiting lib API: scapy_send_bsr_raw_packet")
return True

View File

@ -30,7 +30,7 @@ Basic usage instructions:
* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example
"""
from topolog import logger
from lib.topolog import logger
class SnmpTester(object):
@ -93,7 +93,7 @@ class SnmpTester(object):
return tokens[0].split(".", 1)[1]
def _parse_multiline(self, snmp_output):
results = snmp_output.strip().split("\r\n")
results = snmp_output.strip().split("\n")
out_dict = {}
out_list = []

View File

@ -38,31 +38,30 @@ Basic usage instructions:
* After running stop Mininet with: tgen.stop_topology()
"""
import os
import sys
import io
import logging
import grp
import inspect
import json
import logging
import os
import platform
import pwd
import re
import subprocess
import sys
from collections import OrderedDict
if sys.version_info[0] > 2:
import configparser
else:
import ConfigParser as configparser
import glob
import grp
import platform
import pwd
import subprocess
import pytest
from mininet.net import Mininet
from mininet.log import setLogLevel
from mininet.cli import CLI
import lib.topolog as topolog
from lib.micronet import Commander
from lib.micronet_compat import Mininet
from lib.topolog import logger
from lib.topotest import g_extra_config
from lib import topotest
from lib.topolog import logger, logger_config
from lib.topotest import set_sysctl
CWD = os.path.dirname(os.path.realpath(__file__))
@ -89,6 +88,51 @@ def set_topogen(tgen):
global_tgen = tgen
def is_string(value):
"""Return True if value is a string."""
try:
return isinstance(value, basestring) # type: ignore
except NameError:
return isinstance(value, str)
def get_exabgp_cmd(commander=None):
"""Return the command to use for ExaBGP version < 4."""
if commander is None:
commander = Commander("topogen")
def exacmd_version_ok(exacmd):
logger.debug("checking %s for exabgp < version 4", exacmd)
_, stdout, _ = commander.cmd_status(exacmd + " -v", warn=False)
m = re.search(r"ExaBGP\s*:\s*((\d+)\.(\d+)(?:\.(\d+))?)", stdout)
if not m:
return False
version = m.group(1)
if topotest.version_cmp(version, "4") >= 0:
logging.debug(
"found exabgp version >= 4 in %s will keep looking", exacmd
)
return False
logger.info("Using ExaBGP version %s in %s", version, exacmd)
return True
exacmd = commander.get_exec_path("exabgp")
if exacmd and exacmd_version_ok(exacmd):
return exacmd
py2_path = commander.get_exec_path("python2")
if py2_path:
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
return exacmd
py2_path = commander.get_exec_path("python")
if py2_path:
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
return exacmd
return None
#
# Main class: topology builder
#
@ -107,10 +151,12 @@ class Topogen(object):
CONFIG_SECTION = "topogen"
def __init__(self, cls, modname="unnamed"):
def __init__(self, topodef, modname="unnamed"):
"""
Topogen initialization function, takes the following arguments:
* `cls`: the topology class that is child of mininet.topo
* `cls`: OLD:uthe topology class that is child of mininet.topo or a build function.
* `topodef`: A dictionary defining the topology, a filename of a json file, or a
function that will do the same
* `modname`: module name must be a unique name to identify logs later.
"""
self.config = None
@ -123,16 +169,22 @@ class Topogen(object):
self.errorsd = {}
self.errors = ""
self.peern = 1
self._init_topo(cls)
self.cfg_gen = 0
self.exabgp_cmd = None
self._init_topo(topodef)
logger.info("loading topology: {}".format(self.modname))
@staticmethod
def _mininet_reset():
"Reset the mininet environment"
# Clean up the mininet environment
os.system("mn -c > /dev/null 2>&1")
# @staticmethod
# def _mininet_reset():
# "Reset the mininet environment"
# # Clean up the mininet environment
# os.system("mn -c > /dev/null 2>&1")
def _init_topo(self, cls):
def __str__(self):
return "Topogen()"
def _init_topo(self, topodef):
"""
Initialize the topogily provided by the user. The user topology class
must call get_topogen() during build() to get the topogen object.
@ -140,6 +192,9 @@ class Topogen(object):
# Set the global variable so the test cases can access it anywhere
set_topogen(self)
# Increase host based limits
topotest.fix_host_limits()
# Test for MPLS Kernel modules available
self.hasmpls = False
if not topotest.module_present("mpls-router"):
@ -148,15 +203,90 @@ class Topogen(object):
logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)")
else:
self.hasmpls = True
# Load the default topology configurations
self._load_config()
# Initialize the API
self._mininet_reset()
cls()
# Create new log directory
self.logdir = topotest.get_logs_path(g_extra_config["rundir"])
subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True)
try:
routertype = self.config.get(self.CONFIG_SECTION, "routertype")
# Only allow group, if it exist.
gid = grp.getgrnam(routertype)[2]
os.chown(self.logdir, 0, gid)
os.chmod(self.logdir, 0o775)
except KeyError:
# Allow anyone, but set the sticky bit to avoid file deletions
os.chmod(self.logdir, 0o1777)
# Old twisty way of creating sub-classed topology object which has it's build
# method invoked which calls Topogen methods which then call Topo methods to
# create a topology within the Topo object, which is then used by
# Mininet(Micronet) to build the actual topology.
if inspect.isclass(topodef):
self.topo = topodef()
self.net = Mininet(controller=None, topo=self.topo)
for gear in self.gears.values():
gear.net = self.net
# New direct way: Either a dictionary defines the topology or a build function
# is supplied, or a json filename all of which build the topology by calling
# Topogen methods which call Mininet(Micronet) methods to create the actual
# topology.
if not inspect.isclass(topodef):
if callable(topodef):
topodef(self)
self.net.configure_hosts()
elif is_string(topodef):
# topojson imports topogen in one function too,
# switch away from this use here to the topojson
# fixutre and remove this case
from lib.topojson import build_topo_from_json
with open(topodef, "r") as topof:
self.json_topo = json.load(topof)
build_topo_from_json(self, self.json_topo)
self.net.configure_hosts()
elif topodef:
self.add_topology_from_dict(topodef)
def add_topology_from_dict(self, topodef):
keylist = topodef.keys() if isinstance(topodef, OrderedDict) else sorted(topodef.keys())
# ---------------------------
# Create all referenced hosts
# ---------------------------
for oname in keylist:
tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
for e in tup:
desc = e.split(":")
name = desc[0]
if name not in self.gears:
logging.debug("Adding router: %s", name)
self.add_router(name)
# ------------------------------
# Create all referenced switches
# ------------------------------
for oname in keylist:
if oname is not None and oname not in self.gears:
logging.debug("Adding switch: %s", oname)
self.add_switch(oname)
# ----------------
# Create all links
# ----------------
for oname in keylist:
if oname is None:
continue
tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
for e in tup:
desc = e.split(":")
name = desc[0]
ifname = desc[1] if len(desc) > 1 else None
sifname = desc[2] if len(desc) > 2 else None
self.add_link(self.gears[oname], self.gears[name], sifname, ifname)
self.net.configure_hosts()
def _load_config(self):
"""
@ -167,7 +297,7 @@ class Topogen(object):
pytestini_path = os.path.join(CWD, "../pytest.ini")
self.config.read(pytestini_path)
def add_router(self, name=None, cls=topotest.Router, **params):
def add_router(self, name=None, cls=None, **params):
"""
Adds a new router to the topology. This function has the following
options:
@ -176,6 +306,8 @@ class Topogen(object):
* `routertype`: (optional) `frr`
Returns a TopoRouter.
"""
if cls is None:
cls = topotest.Router
if name is None:
name = "r{}".format(self.routern)
if name in self.gears:
@ -190,7 +322,7 @@ class Topogen(object):
self.routern += 1
return self.gears[name]
def add_switch(self, name=None, cls=topotest.LegacySwitch):
def add_switch(self, name=None):
"""
Adds a new switch to the topology. This function has the following
options:
@ -202,7 +334,7 @@ class Topogen(object):
if name in self.gears:
raise KeyError("switch already exists")
self.gears[name] = TopoSwitch(self, cls, name)
self.gears[name] = TopoSwitch(self, name)
self.switchn += 1
return self.gears[name]
@ -258,7 +390,10 @@ class Topogen(object):
node1.register_link(ifname1, node2, ifname2)
node2.register_link(ifname2, node1, ifname1)
self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2)
if self.net:
self.net.add_link(node1.name, node2.name, ifname1, ifname2)
else:
self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2)
def get_gears(self, geartype):
"""
@ -300,27 +435,8 @@ class Topogen(object):
"""
return self.get_gears(TopoExaBGP)
def start_topology(self, log_level=None):
"""
Starts the topology class. Possible `log_level`s are:
'debug': all information possible
'info': informational messages
'output': default logging level defined by Mininet
'warning': only warning, error and critical messages
'error': only error and critical messages
'critical': only critical messages
"""
# If log_level is not specified use the configuration.
if log_level is None:
log_level = self.config.get(self.CONFIG_SECTION, "verbosity")
# Set python logger level
logger_config.set_log_level(log_level)
# Run mininet
if log_level == "debug":
setLogLevel(log_level)
def start_topology(self):
"""Starts the topology class."""
logger.info("starting topology: {}".format(self.modname))
self.net.start()
@ -331,6 +447,7 @@ class Topogen(object):
"""
if router is None:
# pylint: disable=r1704
# XXX should be hosts?
for _, router in self.routers().items():
router.start()
else:
@ -358,17 +475,19 @@ class Topogen(object):
self.net.stop()
def mininet_cli(self):
def get_exabgp_cmd(self):
if not self.exabgp_cmd:
self.exabgp_cmd = get_exabgp_cmd(self.net)
return self.exabgp_cmd
def cli(self):
"""
Interrupt the test and call the command line interface for manual
inspection. Should be only used on non production code.
"""
if not sys.stdin.isatty():
raise EnvironmentError(
"you must run pytest with '-s' in order to use mininet CLI"
)
self.net.cli()
CLI(self.net)
mininet_cli = cli
def is_memleak_enabled(self):
"Returns `True` if memory leak report is enable, otherwise `False`."
@ -438,13 +557,18 @@ class Topogen(object):
class TopoGear(object):
"Abstract class for type checking"
def __init__(self):
self.tgen = None
self.name = None
self.cls = None
def __init__(self, tgen, name, **params):
self.tgen = tgen
self.name = name
self.params = params
self.links = {}
self.linkn = 0
# Would be nice for this to point at the gears log directory rather than the
# test's.
self.logdir = tgen.logdir
self.gearlogdir = None
def __str__(self):
links = ""
for myif, dest in self.links.items():
@ -455,27 +579,42 @@ class TopoGear(object):
return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links)
@property
def net(self):
return self.tgen.net[self.name]
def start(self):
"Basic start function that just reports equipment start"
logger.info('starting "{}"'.format(self.name))
def stop(self, wait=True, assertOnError=True):
"Basic start function that just reports equipment stop"
logger.info('stopping "{}"'.format(self.name))
"Basic stop function that just reports equipment stop"
logger.info('"{}" base stop called'.format(self.name))
return ""
def run(self, command):
def cmd(self, command, **kwargs):
"""
Runs the provided command string in the router and returns a string
with the response.
"""
return self.tgen.net[self.name].cmd(command)
return self.net.cmd_legacy(command, **kwargs)
def cmd_raises(self, command, **kwargs):
"""
Runs the provided command string in the router and returns a string
with the response. Raise an exception on any error.
"""
return self.net.cmd_raises(command, **kwargs)
run = cmd
def popen(self, *params, **kwargs):
"""
Popen on the router.
Creates a pipe with the given command. Same args as python Popen.
If `command` is a string then will be invoked with shell, otherwise
`command` is a list and will be invoked w/o shell. Returns a popen object.
"""
return self.tgen.net[self.name].popen(*params, **kwargs)
return self.net.popen(*params, **kwargs)
def add_link(self, node, myif=None, nodeif=None):
"""
@ -508,6 +647,7 @@ class TopoGear(object):
extract = ""
if netns is not None:
extract = "ip netns exec {} ".format(netns)
return self.run("{}ip link set dev {} {}".format(extract, myif, operation))
def peer_link_enable(self, myif, enabled=True, netns=None):
@ -546,6 +686,11 @@ class TopoGear(object):
self.links[myif] = (node, nodeif)
def _setup_tmpdir(self):
topotest.setup_node_tmpdir(self.logdir, self.name)
self.gearlogdir = "{}/{}".format(self.logdir, self.name)
return "{}/{}.log".format(self.logdir, self.name)
class TopoRouter(TopoGear):
"""
@ -555,6 +700,7 @@ class TopoRouter(TopoGear):
# The default required directories by FRR
PRIVATE_DIRS = [
"/etc/frr",
"/etc/snmp",
"/var/run/frr",
"/var/log",
]
@ -608,66 +754,28 @@ class TopoRouter(TopoGear):
* daemondir: daemon binary directory
* routertype: 'frr'
"""
super(TopoRouter, self).__init__()
self.tgen = tgen
self.net = None
self.name = name
self.cls = cls
self.options = {}
super(TopoRouter, self).__init__(tgen, name, **params)
self.routertype = params.get("routertype", "frr")
if "privateDirs" not in params:
params["privateDirs"] = self.PRIVATE_DIRS
self.options["memleak_path"] = params.get("memleak_path", None)
# Create new log directory
self.logdir = "/tmp/topotests/{}".format(self.tgen.modname)
# Clean up before starting new log files: avoids removing just created
# log files.
self._prepare_tmpfiles()
# Propagate the router log directory
logfile = self._setup_tmpdir()
params["logdir"] = self.logdir
# setup the per node directory
dir = "{}/{}".format(self.logdir, self.name)
os.system("mkdir -p " + dir)
os.system("chmod -R go+rw /tmp/topotests")
self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
params["logger"] = self.logger
tgen.net.add_host(self.name, cls=cls, **params)
topotest.fix_netns_limits(tgen.net[name])
# Open router log file
logfile = "{0}/{1}.log".format(self.logdir, name)
self.logger = logger_config.get_logger(name=name, target=logfile)
self.tgen.topo.addNode(self.name, cls=self.cls, **params)
# Mount gear log directory on a common path
self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
def __str__(self):
gear = super(TopoRouter, self).__str__()
gear += " TopoRouter<>"
return gear
def _prepare_tmpfiles(self):
# Create directories if they don't exist
try:
os.makedirs(self.logdir, 0o755)
except OSError:
pass
# Allow unprivileged daemon user (frr) to create log files
try:
# Only allow group, if it exist.
gid = grp.getgrnam(self.routertype)[2]
os.chown(self.logdir, 0, gid)
os.chmod(self.logdir, 0o775)
except KeyError:
# Allow anyone, but set the sticky bit to avoid file deletions
os.chmod(self.logdir, 0o1777)
# Try to find relevant old logfiles in /tmp and delete them
map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
# Remove old valgrind files
map(os.remove, glob.glob("{}/{}.valgrind.*".format(self.logdir, self.name)))
# Remove old core files
map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
def check_capability(self, daemon, param):
"""
Checks a capability daemon against an argument option
@ -675,7 +783,7 @@ class TopoRouter(TopoGear):
"""
daemonstr = self.RD.get(daemon)
self.logger.info('check capability {} for "{}"'.format(param, daemonstr))
return self.tgen.net[self.name].checkCapability(daemonstr, param)
return self.net.checkCapability(daemonstr, param)
def load_config(self, daemon, source=None, param=None):
"""
@ -684,17 +792,20 @@ class TopoRouter(TopoGear):
TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP.
This API unfortunately allows for source to not exist for any and
all routers.
"""
daemonstr = self.RD.get(daemon)
self.logger.info('loading "{}" configuration: {}'.format(daemonstr, source))
self.tgen.net[self.name].loadConf(daemonstr, source, param)
self.net.loadConf(daemonstr, source, param)
def check_router_running(self):
"""
Run a series of checks and returns a status string.
"""
self.logger.info("checking if daemons are running")
return self.tgen.net[self.name].checkRouterRunning()
return self.net.checkRouterRunning()
def start(self):
"""
@ -705,46 +816,41 @@ class TopoRouter(TopoGear):
* Start daemons (e.g. FRR)
* Configure daemon logging files
"""
self.logger.debug("starting")
nrouter = self.tgen.net[self.name]
nrouter = self.net
result = nrouter.startRouter(self.tgen)
# Enable command logging
# Enable all daemon command logging, logging files
# and set them to the start dir.
for daemon, enabled in nrouter.daemons.items():
if enabled == 0:
continue
self.vtysh_cmd(
"configure terminal\nlog commands\nlog file {}.log".format(daemon),
daemon=daemon,
)
if enabled and daemon != "snmpd":
self.vtysh_cmd(
"\n".join(["clear log cmdline-targets",
"conf t",
"log file {}.log debug".format(daemon),
"log commands",
"log timestamp precision 3"]),
daemon=daemon,
)
if result != "":
self.tgen.set_error(result)
else:
elif nrouter.daemons["ldpd"] == 1 or nrouter.daemons["pathd"] == 1:
# Enable MPLS processing on all interfaces.
for interface in self.links.keys():
set_sysctl(nrouter, "net.mpls.conf.{}.input".format(interface), 1)
for interface in self.links:
topotest.sysctl_assure(nrouter, "net.mpls.conf.{}.input".format(interface), 1)
return result
def __stop_internal(self, wait=True, assertOnError=True):
"""
Stop router, private internal version
* Kill daemons
"""
self.logger.debug("stopping: wait {}, assert {}".format(wait, assertOnError))
return self.tgen.net[self.name].stopRouter(wait, assertOnError)
def stop(self):
"""
Stop router cleanly:
* Signal daemons twice, once without waiting, and then a second time
with a wait to ensure the daemons exit cleanly
* Signal daemons twice, once with SIGTERM, then with SIGKILL.
"""
self.logger.debug("stopping")
self.__stop_internal(False, False)
return self.__stop_internal(True, False)
self.logger.debug("stopping (no assert)")
return self.net.stopRouter(False)
def startDaemons(self, daemons):
"""
@ -753,17 +859,23 @@ class TopoRouter(TopoGear):
* Configure daemon logging files
"""
self.logger.debug("starting")
nrouter = self.tgen.net[self.name]
nrouter = self.net
result = nrouter.startRouterDaemons(daemons)
if daemons is None:
daemons = nrouter.daemons.keys()
# Enable all daemon command logging, logging files
# and set them to the start dir.
for daemon, enabled in nrouter.daemons.items():
for d in daemons:
if enabled == 0:
continue
for daemon in daemons:
enabled = nrouter.daemons[daemon]
if enabled and daemon != "snmpd":
self.vtysh_cmd(
"configure terminal\nlog commands\nlog file {}.log".format(daemon),
"\n".join(["clear log cmdline-targets",
"conf t",
"log file {}.log debug".format(daemon),
"log commands",
"log timestamp precision 3"]),
daemon=daemon,
)
@ -778,7 +890,7 @@ class TopoRouter(TopoGear):
forcefully using SIGKILL
"""
self.logger.debug("Killing daemons using SIGKILL..")
return self.tgen.net[self.name].killRouterDaemons(daemons, wait, assertOnError)
return self.net.killRouterDaemons(daemons, wait, assertOnError)
def vtysh_cmd(self, command, isjson=False, daemon=None):
"""
@ -798,10 +910,17 @@ class TopoRouter(TopoGear):
vtysh_command = 'vtysh {} -c "{}" 2>/dev/null'.format(dparam, command)
self.logger.info('vtysh command => "{}"'.format(command))
output = self.run(vtysh_command)
self.logger.info(
"\nvtysh command => {}\nvtysh output <= {}".format(command, output)
)
dbgout = output.strip()
if dbgout:
if "\n" in dbgout:
dbgout = dbgout.replace("\n", "\n\t")
self.logger.info('vtysh result:\n\t{}'.format(dbgout))
else:
self.logger.info('vtysh result: "{}"'.format(dbgout))
if isjson is False:
return output
@ -833,13 +952,20 @@ class TopoRouter(TopoGear):
else:
vtysh_command = "vtysh {} -f {}".format(dparam, fname)
dbgcmds = commands if is_string(commands) else "\n".join(commands)
dbgcmds = "\t" + dbgcmds.replace("\n", "\n\t")
self.logger.info('vtysh command => FILE:\n{}'.format(dbgcmds))
res = self.run(vtysh_command)
os.unlink(fname)
self.logger.info(
'\nvtysh command => "{}"\nvtysh output <= "{}"'.format(vtysh_command, res)
)
dbgres = res.strip()
if dbgres:
if "\n" in dbgres:
dbgres = dbgres.replace("\n", "\n\t")
self.logger.info('vtysh result:\n\t{}'.format(dbgres))
else:
self.logger.info('vtysh result: "{}"'.format(dbgres))
return res
def report_memory_leaks(self, testname):
@ -851,7 +977,7 @@ class TopoRouter(TopoGear):
TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`.
"""
memleak_file = (
os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.options["memleak_path"]
os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.params["memleak_path"]
)
if memleak_file == "" or memleak_file == None:
return
@ -859,7 +985,7 @@ class TopoRouter(TopoGear):
self.stop()
self.logger.info("running memory leak report")
self.tgen.net[self.name].report_memory_leaks(memleak_file, testname)
self.net.report_memory_leaks(memleak_file, testname)
def version_info(self):
"Get equipment information from 'show version'."
@ -888,7 +1014,7 @@ class TopoRouter(TopoGear):
Usage example: router.has_version('>', '1.0')
"""
return self.tgen.net[self.name].checkRouterVersion(cmpop, version)
return self.net.checkRouterVersion(cmpop, version)
def has_type(self, rtype):
"""
@ -899,8 +1025,7 @@ class TopoRouter(TopoGear):
return rtype == curtype
def has_mpls(self):
nrouter = self.tgen.net[self.name]
return nrouter.hasmpls
return self.net.hasmpls
class TopoSwitch(TopoGear):
@ -912,13 +1037,9 @@ class TopoSwitch(TopoGear):
# pylint: disable=too-few-public-methods
def __init__(self, tgen, cls, name):
super(TopoSwitch, self).__init__()
self.tgen = tgen
self.net = None
self.name = name
self.cls = cls
self.tgen.topo.addSwitch(name, cls=self.cls)
def __init__(self, tgen, name, **params):
super(TopoSwitch, self).__init__(tgen, name, **params)
tgen.net.add_switch(name)
def __str__(self):
gear = super(TopoSwitch, self).__str__()
@ -939,19 +1060,27 @@ class TopoHost(TopoGear):
* `privateDirs`: directories that will be mounted on a different domain
(e.g. '/etc/important_dir').
"""
super(TopoHost, self).__init__()
self.tgen = tgen
self.net = None
self.name = name
self.options = params
self.tgen.topo.addHost(name, **params)
super(TopoHost, self).__init__(tgen, name, **params)
# Propagate the router log directory
logfile = self._setup_tmpdir()
params["logdir"] = self.logdir
# Odd to have 2 logfiles for each host
self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
params["logger"] = self.logger
tgen.net.add_host(name, **params)
topotest.fix_netns_limits(tgen.net[name])
# Mount gear log directory on a common path
self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
def __str__(self):
gear = super(TopoHost, self).__str__()
gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format(
self.options["ip"],
self.options["defaultRoute"],
str(self.options["privateDirs"]),
self.params["ip"],
self.params["defaultRoute"],
str(self.params["privateDirs"]),
)
return gear
@ -979,7 +1108,6 @@ class TopoExaBGP(TopoHost):
"""
params["privateDirs"] = self.PRIVATE_DIRS
super(TopoExaBGP, self).__init__(tgen, name, **params)
self.tgen.topo.addHost(name, **params)
def __str__(self):
gear = super(TopoExaBGP, self).__str__()
@ -994,7 +1122,10 @@ class TopoExaBGP(TopoHost):
* Make all python files runnable
* Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg
"""
self.run("mkdir /etc/exabgp")
exacmd = self.tgen.get_exabgp_cmd()
assert exacmd, "Can't find a usabel ExaBGP (must be < version 4)"
self.run("mkdir -p /etc/exabgp")
self.run("chmod 755 /etc/exabgp")
self.run("cp {}/* /etc/exabgp/".format(peer_dir))
if env_file is not None:
@ -1002,9 +1133,11 @@ class TopoExaBGP(TopoHost):
self.run("chmod 644 /etc/exabgp/*")
self.run("chmod a+x /etc/exabgp/*.py")
self.run("chown -R exabgp:exabgp /etc/exabgp")
output = self.run("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
output = self.run(exacmd + " -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
if output == None or len(output) == 0:
output = "<none>"
logger.info("{} exabgp started, output={}".format(self.name, output))
def stop(self, wait=True, assertOnError=True):
@ -1019,42 +1152,38 @@ class TopoExaBGP(TopoHost):
# Disable linter branch warning. It is expected to have these here.
# pylint: disable=R0912
def diagnose_env_linux():
def diagnose_env_linux(rundir):
"""
Run diagnostics in the running environment. Returns `True` when everything
is ok, otherwise `False`.
"""
ret = True
# Test log path exists before installing handler.
if not os.path.isdir("/tmp"):
logger.warning("could not find /tmp for logs")
else:
os.system("mkdir -p /tmp/topotests")
# Log diagnostics to file so it can be examined later.
fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt")
fhandler.setLevel(logging.DEBUG)
fhandler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
logger.addHandler(fhandler)
logger.info("Running environment diagnostics")
# Load configuration
config = configparser.ConfigParser(defaults=tgen_defaults)
pytestini_path = os.path.join(CWD, "../pytest.ini")
config.read(pytestini_path)
# Test log path exists before installing handler.
os.system("mkdir -p " + rundir)
# Log diagnostics to file so it can be examined later.
fhandler = logging.FileHandler(filename="{}/diagnostics.txt".format(rundir))
fhandler.setLevel(logging.DEBUG)
fhandler.setFormatter(logging.Formatter(fmt=topolog.FORMAT))
logger.addHandler(fhandler)
logger.info("Running environment diagnostics")
# Assert that we are running as root
if os.getuid() != 0:
logger.error("you must run topotest as root")
ret = False
# Assert that we have mininet
if os.system("which mn >/dev/null 2>/dev/null") != 0:
logger.error("could not find mininet binary (mininet is not installed)")
ret = False
# if os.system("which mn >/dev/null 2>/dev/null") != 0:
# logger.error("could not find mininet binary (mininet is not installed)")
# ret = False
# Assert that we have iproute installed
if os.system("which ip >/dev/null 2>/dev/null") != 0:
@ -1118,7 +1247,7 @@ def diagnose_env_linux():
if fname != "zebra":
continue
os.system("{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path))
os.system("{} -v 2>&1 >{}/frr_zebra.txt".format(path, rundir))
# Test MPLS availability
krel = platform.release()
@ -1135,23 +1264,9 @@ def diagnose_env_linux():
if not topotest.module_present("mpls-iptunnel", load=False) != 0:
logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)")
# TODO remove me when we start supporting exabgp >= 4
try:
p = os.popen("exabgp -v")
line = p.readlines()
version = line[0].split()
if topotest.version_cmp(version[2], "4") >= 0:
logger.warning(
"BGP topologies are still using exabgp version 3, expect failures"
)
p.close()
if not get_exabgp_cmd():
logger.warning("Failed to find exabgp < 4")
# We want to catch all exceptions
# pylint: disable=W0702
except:
logger.warning("failed to find exabgp or returned error")
# After we logged the output to file, remove the handler.
logger.removeHandler(fhandler)
fhandler.close()
@ -1162,9 +1277,9 @@ def diagnose_env_freebsd():
return True
def diagnose_env():
def diagnose_env(rundir):
if sys.platform.startswith("linux"):
return diagnose_env_linux()
return diagnose_env_linux(rundir)
elif sys.platform.startswith("freebsd"):
return diagnose_env_freebsd()

View File

@ -18,34 +18,27 @@
# OF THIS SOFTWARE.
#
from collections import OrderedDict
from json import dumps as json_dumps
from re import search as re_search
import json
import ipaddress
import pytest
import ipaddr
import os
from collections import OrderedDict
from copy import deepcopy
from re import search as re_search
import ipaddr
import pytest
# Import topogen and topotest helpers
from lib.topolog import logger
# Required to instantiate the topology builder class.
from lib.common_config import (
number_to_row,
number_to_column,
load_config_to_routers,
create_interfaces_cfg,
create_static_routes,
create_prefix_lists,
create_route_maps,
create_bgp_community_lists,
create_vrf_cfg,
)
from lib.pim import create_pim_config, create_igmp_config
from lib.bgp import create_router_bgp
from lib.common_config import (create_bgp_community_lists,
create_interfaces_cfg, create_prefix_lists,
create_route_maps, create_static_routes,
create_vrf_cfg, load_config_to_routers,
start_topology,
topo_daemons,
number_to_column)
from lib.ospf import create_router_ospf, create_router_ospf6
from lib.pim import create_igmp_config, create_pim_config
from lib.topolog import logger
ROUTER_LIST = []
@ -60,13 +53,13 @@ def build_topo_from_json(tgen, topo):
"""
ROUTER_LIST = sorted(
topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0))
topo["routers"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
)
SWITCH_LIST = []
if "switches" in topo:
SWITCH_LIST = sorted(
topo["switches"].keys(), key=lambda x: int(re_search("\d+", x).group(0))
topo["switches"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
)
listRouters = sorted(ROUTER_LIST[:])
@ -204,7 +197,7 @@ def build_topo_from_json(tgen, topo):
logger.debug(
"Generated link data for router: %s\n%s",
curRouter,
json_dumps(
json.dumps(
topo["routers"][curRouter]["links"], indent=4, sort_keys=True
),
)
@ -287,7 +280,7 @@ def build_topo_from_json(tgen, topo):
logger.debug(
"Generated link data for router: %s\n%s",
curRouter,
json_dumps(
json.dumps(
topo["routers"][curRouter]["links"], indent=4, sort_keys=True
),
)
@ -297,7 +290,7 @@ def linux_intf_config_from_json(tgen, topo):
"""Configure interfaces from linux based on topo."""
routers = topo["routers"]
for rname in routers:
router = tgen.gears[rname]
router = tgen.net[rname]
links = routers[rname]["links"]
for rrname in links:
link = links[rrname]
@ -306,9 +299,9 @@ def linux_intf_config_from_json(tgen, topo):
else:
lname = link["interface"]
if "ipv4" in link:
router.run("ip addr add {} dev {}".format(link["ipv4"], lname))
router.cmd_raises("ip addr add {} dev {}".format(link["ipv4"], lname))
if "ipv6" in link:
router.run("ip -6 addr add {} dev {}".format(link["ipv6"], lname))
router.cmd_raises("ip -6 addr add {} dev {}".format(link["ipv6"], lname))
def build_config_from_json(tgen, topo, save_bkup=True):
@ -347,3 +340,50 @@ def build_config_from_json(tgen, topo, save_bkup=True):
if not result:
logger.info("build_config_from_json: failed to configure topology")
pytest.exit(1)
def create_tgen_from_json(testfile, json_file=None):
"""Create a topogen object given a testfile.
- `testfile` : The path to the testfile.
- `json_file` : The path to the json config file. If None the pathname is derived
from the `testfile` first by trying to replace `.py` by `.json` and if that isn't
present then by removing `test_` prefix as well.
"""
from lib.topogen import Topogen # Topogen imports this module too
thisdir = os.path.dirname(os.path.realpath(testfile))
basename = os.path.basename(testfile)
logger.debug("starting standard JSON based module setup for %s", basename)
assert basename.startswith("test_")
assert basename.endswith(".py")
json_file = os.path.join(thisdir, basename[:-3] + ".json")
if not os.path.exists(json_file):
json_file = os.path.join(thisdir, basename[5:-3] + ".json")
assert os.path.exists(json_file)
with open(json_file, "r") as topof:
topo = json.load(topof)
# Create topology
tgen = Topogen(lambda tgen: build_topo_from_json(tgen, topo), basename[:-3])
tgen.json_topo = topo
return tgen
def setup_module_from_json(testfile, json_file=None):
"""Do the standard module setup for JSON based test.
* `testfile` : The path to the testfile. The name is used to derive the json config
file name as well (removing `test_` prefix and replacing `.py` suffix with `.json`
"""
# Create topology object
tgen = create_tgen_from_json(testfile, json_file)
# Start routers (and their daemons)
start_topology(tgen, topo_daemons(tgen))
# Configure routers
build_config_from_json(tgen)
assert not tgen.routers_have_failure()
return tgen

View File

@ -26,8 +26,23 @@ Logging utilities for topology tests.
This file defines our logging abstraction.
"""
import sys
import logging
import os
import subprocess
import sys
if sys.version_info[0] > 2:
import configparser
else:
import ConfigParser as configparser
try:
from xdist import is_xdist_controller
except ImportError:
def is_xdist_controller():
return False
BASENAME = "topolog"
# Helper dictionary to convert Topogen logging levels to Python's logging.
DEBUG_TOPO2LOGGING = {
@ -38,81 +53,121 @@ DEBUG_TOPO2LOGGING = {
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s: %(name)s: %(message)s"
handlers = {}
logger = logging.getLogger("topolog")
class InfoFilter(logging.Filter):
def filter(self, rec):
return rec.levelno in (logging.DEBUG, logging.INFO)
#
# Logger class definition
#
class Logger(object):
"""
Logger class that encapsulates logging functions, internaly it uses Python
logging module with a separated instance instead of global.
Default logging level is 'info'.
"""
def __init__(self):
# Create default global logger
self.log_level = logging.INFO
self.logger = logging.Logger("topolog", level=self.log_level)
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(logging.DEBUG)
handler_stdout.addFilter(InfoFilter())
handler_stdout.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
handler_stderr = logging.StreamHandler()
handler_stderr.setLevel(logging.WARNING)
handler_stderr.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
self.logger.addHandler(handler_stdout)
self.logger.addHandler(handler_stderr)
# Handle more loggers
self.loggers = {"topolog": self.logger}
def set_log_level(self, level):
"Set the logging level"
self.log_level = DEBUG_TOPO2LOGGING.get(level)
self.logger.setLevel(self.log_level)
def get_logger(self, name="topolog", log_level=None, target=sys.stdout):
"""
Get a new logger entry. Allows creating different loggers for formating,
filtering or handling (file, stream or stdout/stderr).
"""
if log_level is None:
log_level = self.log_level
if name in self.loggers:
return self.loggers[name]
nlogger = logging.Logger(name, level=log_level)
def set_handler(l, target=None):
if target is None:
h = logging.NullHandler()
else:
if isinstance(target, str):
handler = logging.FileHandler(filename=target)
h = logging.FileHandler(filename=target, mode="w")
else:
handler = logging.StreamHandler(stream=target)
handler.setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
nlogger.addHandler(handler)
self.loggers[name] = nlogger
return nlogger
h = logging.StreamHandler(stream=target)
h.setFormatter(logging.Formatter(fmt=FORMAT))
# Don't filter anything at the handler level
h.setLevel(logging.DEBUG)
l.addHandler(h)
return h
#
# Global variables
#
def set_log_level(l, level):
"Set the logging level."
# Messages sent to this logger only are created if this level or above.
log_level = DEBUG_TOPO2LOGGING.get(level, level)
l.setLevel(log_level)
logger_config = Logger()
logger = logger_config.logger
def get_logger(name, log_level=None, target=None):
l = logging.getLogger("{}.{}".format(BASENAME, name))
if log_level is not None:
set_log_level(l, log_level)
if target is not None:
set_handler(l, target)
return l
# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
def get_test_logdir(nodeid=None):
"""Get log directory relative pathname."""
xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
mode = os.getenv("PYTEST_XDIST_MODE", "no")
if not nodeid:
nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
cur_test = nodeid.replace("[", "_").replace("]", "_")
path, testname = cur_test.split("::")
path = path[:-3].replace("/", ".")
# We use different logdir paths based on how xdist is running.
if mode == "each":
return os.path.join(path, testname, xdist_worker)
elif mode == "load":
return os.path.join(path, testname)
else:
assert (
mode == "no" or
mode == "loadfile" or
mode == "loadscope"
), "Unknown dist mode {}".format(mode)
return path
def logstart(nodeid, location, rundir):
"""Called from pytest before module setup."""
mode = os.getenv("PYTEST_XDIST_MODE", "no")
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
# We only per-test log in the workers (or non-dist)
if not worker and mode != "no":
return
handler_id = nodeid + worker
assert handler_id not in handlers
rel_log_dir = get_test_logdir(nodeid)
exec_log_dir = os.path.join(rundir, rel_log_dir)
subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True)
exec_log_path = os.path.join(exec_log_dir, "exec.log")
# Add test based exec log handler
h = set_handler(logger, exec_log_path)
handlers[handler_id] = h
if worker:
logger.info("Logging on worker %s for %s into %s", worker, handler_id, exec_log_path)
else:
logger.info("Logging for %s into %s", handler_id, exec_log_path)
def logfinish(nodeid, location):
"""Called from pytest after module teardown."""
# This function may not be called if pytest is interrupted.
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
handler_id = nodeid + worker
if handler_id in handlers:
# Remove test based exec log handler
if worker:
logger.info("Closing logs for %s", handler_id)
h = handlers[handler_id]
logger.removeHandler(handlers[handler_id])
h.flush()
h.close()
del handlers[handler_id]
console_handler = set_handler(logger, None)
set_log_level(logger, "debug")

View File

@ -22,38 +22,40 @@
# OF THIS SOFTWARE.
#
import json
import os
import difflib
import errno
import re
import sys
import functools
import glob
import subprocess
import tempfile
import json
import os
import pdb
import platform
import difflib
import time
import re
import resource
import signal
from lib.topolog import logger
import subprocess
import sys
import tempfile
import time
from copy import deepcopy
import lib.topolog as topolog
from lib.topolog import logger
if sys.version_info[0] > 2:
import configparser
else:
import ConfigParser as configparser
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node, OVSSwitch, Host
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.link import Intf
from mininet.term import makeTerm
from lib import micronet
from lib.micronet_compat import Node
g_extra_config = {}
def get_logs_path(rundir):
logspath = topolog.get_test_logdir()
return os.path.join(rundir, logspath)
def gdb_core(obj, daemon, corefiles):
gdbcmds = """
@ -283,7 +285,7 @@ def json_cmp(d1, d2, exact=False):
* `d2`: parsed JSON data structure
Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
in d1, e.g. when d2 is a "subset" of d1 without honoring any order. Otherwise an
in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
error report is generated and wrapped in a 'json_cmp_result()'. There are special
parameters and notations explained below which can be used to cover rather unusual
cases:
@ -497,6 +499,8 @@ def get_file(content):
"""
Generates a temporary file in '/tmp' with `content` and returns the file name.
"""
if isinstance(content, list) or isinstance(content, tuple):
content = "\n".join(content)
fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
fname = fde.name
fde.write(content)
@ -991,7 +995,6 @@ def checkAddressSanitizerError(output, router, component, logdir=""):
and (callingProc != "checkAddressSanitizerError")
and (callingProc != "checkRouterCores")
and (callingProc != "stopRouter")
and (callingProc != "__stop_internal")
and (callingProc != "stop")
and (callingProc != "stop_topology")
and (callingProc != "checkRouterRunning")
@ -1026,7 +1029,7 @@ def checkAddressSanitizerError(output, router, component, logdir=""):
return
addressSanitizerError = re.search(
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
)
if addressSanitizerError:
processAddressSanitizerError(addressSanitizerError, output, router, component)
@ -1042,7 +1045,7 @@ def checkAddressSanitizerError(output, router, component, logdir=""):
with open(file, "r") as asanErrorFile:
asanError = asanErrorFile.read()
addressSanitizerError = re.search(
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
)
if addressSanitizerError:
processAddressSanitizerError(
@ -1052,48 +1055,218 @@ def checkAddressSanitizerError(output, router, component, logdir=""):
return False
def addRouter(topo, name):
"Adding a FRRouter to Topology"
def _sysctl_atleast(commander, variable, min_value):
if isinstance(min_value, tuple):
min_value = list(min_value)
is_list = isinstance(min_value, list)
MyPrivateDirs = [
"/etc/frr",
"/var/run/frr",
"/var/log",
]
if sys.platform.startswith("linux"):
return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs)
elif sys.platform.startswith("freebsd"):
return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs)
sval = commander.cmd_raises("sysctl -n " + variable).strip()
if is_list:
cur_val = [int(x) for x in sval.split()]
else:
cur_val = int(sval)
set_value = False
if is_list:
for i, v in enumerate(cur_val):
if v < min_value[i]:
set_value = True
else:
min_value[i] = v
else:
if cur_val < min_value:
set_value = True
if set_value:
if is_list:
valstr = " ".join([str(x) for x in min_value])
else:
valstr = str(min_value)
logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
def set_sysctl(node, sysctl, value):
"Set a sysctl value and return None on success or an error string"
valuestr = "{}".format(value)
command = "sysctl {0}={1}".format(sysctl, valuestr)
cmdret = node.cmd(command)
def _sysctl_assure(commander, variable, value):
if isinstance(value, tuple):
value = list(value)
is_list = isinstance(value, list)
matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret)
if matches is None:
return cmdret
if matches.group(1) != sysctl:
return cmdret
if matches.group(2) != valuestr:
return cmdret
sval = commander.cmd_raises("sysctl -n " + variable).strip()
if is_list:
cur_val = [int(x) for x in sval.split()]
else:
cur_val = sval
return None
set_value = False
if is_list:
for i, v in enumerate(cur_val):
if v != value[i]:
set_value = True
else:
value[i] = v
else:
if cur_val != str(value):
set_value = True
if set_value:
if is_list:
valstr = " ".join([str(x) for x in value])
else:
valstr = str(value)
logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
def assert_sysctl(node, sysctl, value):
"Set and assert that the sysctl is set with the specified value."
assert set_sysctl(node, sysctl, value) is None
def sysctl_atleast(commander, variable, min_value, raises=False):
try:
if commander is None:
commander = micronet.Commander("topotest")
return _sysctl_atleast(commander, variable, min_value)
except subprocess.CalledProcessError as error:
logger.warning(
"%s: Failed to assure sysctl min value %s = %s",
commander, variable, min_value
)
if raises:
raise
def sysctl_assure(commander, variable, value, raises=False):
try:
if commander is None:
commander = micronet.Commander("topotest")
return _sysctl_assure(commander, variable, value)
except subprocess.CalledProcessError as error:
logger.warning(
"%s: Failed to assure sysctl value %s = %s",
commander, variable, value, exc_info=True
)
if raises:
raise
def rlimit_atleast(rname, min_value, raises=False):
try:
cval = resource.getrlimit(rname)
soft, hard = cval
if soft < min_value:
nval = (min_value, hard if min_value < hard else min_value)
logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
resource.setrlimit(rname, nval)
except subprocess.CalledProcessError as error:
logger.warning(
"Failed to assure rlimit [%s] = %s",
rname, min_value, exc_info=True
)
if raises:
raise
def fix_netns_limits(ns):
# Maximum read and write socket buffer sizes
sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10*1024, 87380, 16*2**20])
sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10*1024, 87380, 16*2**20])
sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
# XXX if things fail look here as this wasn't done previously
sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
# ARP
sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
# Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
# Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
# Keep ipv6 permanent addresses on an admin down
sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
if version_cmp(platform.release(), "4.20") >= 0:
sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
# igmp
sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
# Use neigh information on selection of nexthop for multipath hops
sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
def fix_host_limits():
"""Increase system limits."""
rlimit_atleast(resource.RLIMIT_NPROC, 8*1024)
rlimit_atleast(resource.RLIMIT_NOFILE, 16*1024)
sysctl_atleast(None, "fs.file-max", 16*1024)
sysctl_atleast(None, "kernel.pty.max", 16*1024)
# Enable coredumps
# Original on ubuntu 17.x, but apport won't save as in namespace
# |/usr/share/apport/apport %p %s %c %d %P
sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
sysctl_assure(None, "kernel.core_uses_pid", 1)
sysctl_assure(None, "fs.suid_dumpable", 1)
# Maximum connection backlog
sysctl_atleast(None, "net.core.netdev_max_backlog", 4*1024)
# Maximum read and write socket buffer sizes
sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
# Garbage Collection Settings for ARP and Neighbors
sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4*1024)
sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8*1024)
sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4*1024)
sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8*1024)
# Hold entries for 10 minutes
sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
# igmp
sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
# MLD
sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
# Increase routing table size to 128K
sysctl_atleast(None, "net.ipv4.route.max_size", 128*1024)
sysctl_atleast(None, "net.ipv6.route.max_size", 128*1024)
def setup_node_tmpdir(logdir, name):
# Cleanup old log, valgrind, and core files.
subprocess.check_call(
"rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(
logdir, name
),
shell=True
)
# Setup the per node directory.
nodelogdir = "{}/{}".format(logdir, name)
subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True)
logfile = "{0}/{1}.log".format(logdir, name)
return logfile
class Router(Node):
"A Node with IPv4/IPv6 forwarding enabled"
def __init__(self, name, **params):
super(Router, self).__init__(name, **params)
self.logdir = params.get("logdir")
# Backward compatibility:
# Load configuration defaults like topogen.
@ -1105,25 +1278,24 @@ class Router(Node):
"memleak_path": "",
}
)
self.config_defaults.read(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
)
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
self.logdir = params.get("logdir")
if self.logdir is None:
cur_test = os.environ["PYTEST_CURRENT_TEST"]
self.logdir = "/tmp/topotests/" + cur_test[
cur_test.find("/") + 1 : cur_test.find(".py")
].replace("/", ".")
self.logdir = get_logs_path(g_extra_config["rundir"])
# If the logdir is not created, then create it and set the
# appropriated permissions.
if not os.path.isdir(self.logdir):
os.system("mkdir -p " + self.logdir + "/" + name)
os.system("chmod -R go+rw /tmp/topotests")
# Erase logs of previous run
os.system("rm -rf " + self.logdir + "/" + name)
if not params.get("logger"):
# If logger is present topogen has already set this up
logfile = setup_node_tmpdir(self.logdir, name)
l = topolog.get_logger(name, log_level="debug", target=logfile)
params["logger"] = l
super(Router, self).__init__(name, **params)
self.daemondir = None
self.hasmpls = False
@ -1152,7 +1324,7 @@ class Router(Node):
self.reportCores = True
self.version = None
self.ns_cmd = "sudo nsenter -m -n -t {} ".format(self.pid)
self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
try:
# Allow escaping from running inside docker
cgroup = open("/proc/1/cgroup").read()
@ -1202,118 +1374,87 @@ class Router(Node):
def terminate(self):
# Stop running FRR daemons
self.stopRouter()
# Disable forwarding
set_sysctl(self, "net.ipv4.ip_forward", 0)
set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
super(Router, self).terminate()
os.system("chmod -R go+rw /tmp/topotests")
os.system("chmod -R go+rw " + self.logdir)
# Return count of running daemons
def listDaemons(self):
ret = []
rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
errors = ""
if re.search(r"No such file or directory", rundaemons):
return 0
if rundaemons is not None:
bet = rundaemons.split("\n")
for d in bet[:-1]:
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
ret.append(os.path.basename(d.rstrip().rsplit(".", 1)[0]))
rc, stdout, _ = self.cmd_status("ls -1 /var/run/%s/*.pid" % self.routertype, warn=False)
if rc:
return ret
for d in stdout.strip().split("\n"):
pidfile = d.strip()
try:
pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
name = os.path.basename(pidfile[:-4])
# probably not compatible with bsd.
rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
if rc:
logger.warning("%s: %s exited leaving pidfile %s (%s)", self.name, name, pidfile, pid)
self.cmd("rm -- " + pidfile)
else:
ret.append((name, pid))
except (subprocess.CalledProcessError, ValueError):
pass
return ret
def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"):
def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
# Stop Running FRR Daemons
rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
errors = ""
if re.search(r"No such file or directory", rundaemons):
return errors
if rundaemons is not None:
dmns = rundaemons.split("\n")
# Exclude empty string at end of list
for d in dmns[:-1]:
# Only check if daemonfilepath starts with /
# Avoids hang on "-> Connection closed" in above self.cmd()
if d[0] == '/':
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
logger.info("{}: stopping {}".format(self.name, daemonname))
try:
os.kill(int(daemonpid), signal.SIGTERM)
except OSError as err:
if err.errno == errno.ESRCH:
logger.error(
"{}: {} left a dead pidfile (pid={})".format(
self.name, daemonname, daemonpid
)
)
else:
logger.info(
"{}: {} could not kill pid {}: {}".format(
self.name, daemonname, daemonpid, str(err)
)
)
running = self.listDaemons()
if not running:
return ""
if not wait:
return errors
logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
for name, pid in running:
logger.info("{}: sending SIGTERM to {}".format(self.name, name))
try:
os.kill(pid, signal.SIGTERM)
except OSError as err:
logger.info("%s: could not kill %s (%s): %s", self.name, name, pid, str(err))
running = self.listDaemons()
if running:
running = self.listDaemons()
if running:
for _ in range(0, 5):
sleep(
0.1,
0.5,
"{}: waiting for daemons stopping: {}".format(
self.name, ", ".join(running)
self.name, ", ".join([x[0] for x in running])
),
)
running = self.listDaemons()
if not running:
break
counter = 20
while counter > 0 and running:
sleep(
0.5,
"{}: waiting for daemons stopping: {}".format(
self.name, ", ".join(running)
),
)
running = self.listDaemons()
counter -= 1
if not running:
return ""
if running:
# 2nd round of kill if daemons didn't exit
dmns = rundaemons.split("\n")
# Exclude empty string at end of list
for d in dmns[:-1]:
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
logger.info(
"{}: killing {}".format(
self.name,
os.path.basename(d.rstrip().rsplit(".", 1)[0]),
)
)
self.cmd("kill -7 %s" % daemonpid)
self.waitOutput()
self.cmd("rm -- {}".format(d.rstrip()))
logger.warning("%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]))
for name, pid in running:
pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
logger.info("%s: killing %s", self.name, name)
self.cmd("kill -SIGBUS %d" % pid)
self.cmd("rm -- " + pidfile)
if not wait:
return errors
sleep(0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name)
errors = self.checkRouterCores(reportOnce=True)
if self.checkRouterVersion("<", minErrorVersion):
# ignore errors in old versions
errors = ""
if assertOnError and errors is not None and len(errors) > 0:
if assertOnError and (errors is not None) and len(errors) > 0:
assert "Errors found - details follow:" == 0, errors
return errors
def removeIPs(self):
for interface in self.intfNames():
self.cmd("ip address flush", interface)
try:
self.intf_ip_cmd(interface, "ip address flush " + interface)
except Exception as ex:
logger.error("%s can't remove IPs %s", self, str(ex))
# pdb.set_trace()
# assert False, "can't remove IPs %s" % str(ex)
def checkCapability(self, daemon, param):
if param is not None:
@ -1327,29 +1468,32 @@ class Router(Node):
return True
def loadConf(self, daemon, source=None, param=None):
# Unfortunately this API allowsfor source to not exist for any and all routers.
# print "Daemons before:", self.daemons
if daemon in self.daemons.keys():
self.daemons[daemon] = 1
if param is not None:
self.daemons_options[daemon] = param
if source is None:
self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon))
self.waitOutput()
conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
if source is None or not os.path.exists(source):
self.cmd_raises("touch " + conf_file)
else:
self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon))
self.waitOutput()
self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon))
self.waitOutput()
self.cmd(
"chown %s:%s /etc/%s/%s.conf"
% (self.routertype, self.routertype, self.routertype, daemon)
)
self.waitOutput()
self.cmd_raises("cp {} {}".format(source, conf_file))
self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
self.cmd_raises("chmod 664 {}".format(conf_file))
if (daemon == "snmpd") and (self.routertype == "frr"):
# /etc/snmp is private mount now
self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf')
self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
staticd_path = os.path.join(self.daemondir, "staticd")
try:
staticd_path = os.path.join(self.daemondir, "staticd")
except:
pdb.set_trace()
if os.path.isfile(staticd_path):
self.daemons["staticd"] = 1
self.daemons_options["staticd"] = ""
@ -1358,27 +1502,8 @@ class Router(Node):
logger.info("No daemon {} known".format(daemon))
# print "Daemons after:", self.daemons
# Run a command in a new window (gnome-terminal, screen, tmux, xterm)
def runInWindow(self, cmd, title=None):
topo_terminal = os.getenv("FRR_TOPO_TERMINAL")
if topo_terminal or ("TMUX" not in os.environ and "STY" not in os.environ):
term = topo_terminal if topo_terminal else "xterm"
makeTerm(self, title=title if title else cmd, term=term, cmd=cmd)
else:
nscmd = self.ns_cmd + cmd
if "TMUX" in os.environ:
self.cmd("tmux select-layout main-horizontal")
wcmd = "tmux split-window -h"
cmd = "{} {}".format(wcmd, nscmd)
elif "STY" in os.environ:
if os.path.exists(
"/run/screen/S-{}/{}".format(os.environ["USER"], os.environ["STY"])
):
wcmd = "screen"
else:
wcmd = "sudo -u {} screen".format(os.environ["SUDO_USER"])
cmd = "{} {}".format(wcmd, nscmd)
self.cmd(cmd)
return self.run_in_window(cmd, title)
def startRouter(self, tgen=None):
# Disable integrated-vtysh-config
@ -1430,15 +1555,18 @@ class Router(Node):
self.hasmpls = True
if self.hasmpls != True:
return "LDP/MPLS Tests need mpls kernel modules"
# Really want to use sysctl_atleast here, but only when MPLS is actually being
# used
self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
shell_routers = g_extra_config["shell"]
if "all" in shell_routers or self.name in shell_routers:
self.runInWindow(os.getenv("SHELL", "bash"))
self.run_in_window(os.getenv("SHELL", "bash"))
vtysh_routers = g_extra_config["vtysh"]
if "all" in vtysh_routers or self.name in vtysh_routers:
self.runInWindow("vtysh")
self.run_in_window("vtysh")
if self.daemons["eigrpd"] == 1:
eigrpd_path = os.path.join(self.daemondir, "eigrpd")
@ -1464,7 +1592,7 @@ class Router(Node):
return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
def startRouterDaemons(self, daemons=None, tgen=None):
"Starts all FRR daemons for this router."
"Starts FRR daemons for this router."
asan_abort = g_extra_config["asan_abort"]
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
@ -1474,20 +1602,22 @@ class Router(Node):
valgrind_memleaks = g_extra_config["valgrind_memleaks"]
strace_daemons = g_extra_config["strace_daemons"]
bundle_data = ""
if os.path.exists("/etc/frr/support_bundle_commands.conf"):
bundle_data = subprocess.check_output(
["cat /etc/frr/support_bundle_commands.conf"], shell=True
# Get global bundle data
if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
# Copy global value if was covered by namespace mount
bundle_data = ""
if os.path.exists("/etc/frr/support_bundle_commands.conf"):
with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
bundle_data = rf.read()
self.cmd_raises(
"cat > /etc/frr/support_bundle_commands.conf",
stdin=bundle_data,
)
self.cmd(
"echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data)
)
# Starts actual daemons without init (ie restart)
# cd to per node directory
self.cmd("install -d {}/{}".format(self.logdir, self.name))
self.cmd("cd {}/{}".format(self.logdir, self.name))
self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
self.set_cwd("{}/{}".format(self.logdir, self.name))
self.cmd("umask 000")
# Re-enable to allow for report per run
@ -1560,13 +1690,25 @@ class Router(Node):
gdbcmd += " -ex 'b {}'".format(bp)
gdbcmd += " -ex 'run {}'".format(cmdopt)
self.runInWindow(gdbcmd, daemon)
self.run_in_window(gdbcmd, daemon)
logger.info("%s: %s %s launched in gdb window", self, self.routertype, daemon)
else:
if daemon != "snmpd":
cmdopt += " -d "
cmdopt += rediropt
self.cmd(" ".join([cmdenv, binary, cmdopt]))
logger.info("{}: {} {} started".format(self, self.routertype, daemon))
try:
self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
except subprocess.CalledProcessError as error:
self.logger.error(
'%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
self, daemon, error.returncode, error.cmd,
'\n:stdout: "{}"'.format(error.stdout.strip()) if error.stdout else "",
'\n:stderr: "{}"'.format(error.stderr.strip()) if error.stderr else "",
)
else:
logger.info("%s: %s %s started", self, self.routertype, daemon)
# Start Zebra first
if "zebra" in daemons_list:
@ -1581,15 +1723,22 @@ class Router(Node):
daemons_list.remove("staticd")
if "snmpd" in daemons_list:
# Give zerbra a chance to configure interface addresses that snmpd daemon
# may then use.
time.sleep(2)
start_daemon("snmpd")
while "snmpd" in daemons_list:
daemons_list.remove("snmpd")
# Fix Link-Local Addresses
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
self.cmd(
"for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done"
)
if daemons is None:
# Fix Link-Local Addresses on initial startup
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
_, output, _ = self.cmd_status(
"for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
stderr=subprocess.STDOUT
)
logger.debug("Set MACs:\n%s", output)
# Now start all the other daemons
for daemon in daemons_list:
@ -1602,6 +1751,10 @@ class Router(Node):
if re.search(r"No such file or directory", rundaemons):
return "Daemons are not running"
# Update the permissions on the log files
self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
return ""
def killRouterDaemons(
@ -1630,7 +1783,6 @@ class Router(Node):
)
)
self.cmd("kill -9 %s" % daemonpid)
self.waitOutput()
if pid_exists(int(daemonpid)):
numRunning += 1
if wait and numRunning > 0:
@ -1657,7 +1809,6 @@ class Router(Node):
)
)
self.cmd("kill -9 %s" % daemonpid)
self.waitOutput()
self.cmd("rm -- {}".format(d.rstrip()))
if wait:
errors = self.checkRouterCores(reportOnce=True)
@ -1914,53 +2065,9 @@ class Router(Node):
leakfile.close()
class LinuxRouter(Router):
"A Linux Router Node with IPv4/IPv6 forwarding enabled."
def __init__(self, name, **params):
Router.__init__(self, name, **params)
def config(self, **params):
Router.config(self, **params)
# Enable forwarding on the router
assert_sysctl(self, "net.ipv4.ip_forward", 1)
assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1)
# Enable coredumps
assert_sysctl(self, "kernel.core_uses_pid", 1)
assert_sysctl(self, "fs.suid_dumpable", 1)
# this applies to the kernel not the namespace...
# original on ubuntu 17.x, but apport won't save as in namespace
# |/usr/share/apport/apport %p %s %c %d %P
corefile = "%e_core-sig_%s-pid_%p.dmp"
assert_sysctl(self, "kernel.core_pattern", corefile)
def terminate(self):
"""
Terminate generic LinuxRouter Mininet instance
"""
set_sysctl(self, "net.ipv4.ip_forward", 0)
set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
Router.terminate(self)
class FreeBSDRouter(Router):
"A FreeBSD Router Node with IPv4/IPv6 forwarding enabled."
def __init__(self, name, **params):
Router.__init__(self, name, **params)
class LegacySwitch(OVSSwitch):
"A Legacy Switch without OpenFlow"
def __init__(self, name, **params):
OVSSwitch.__init__(self, name, failMode="standalone", **params)
self.switchIP = None
def frr_unicode(s):
"""Convert string to unicode, depending on python version"""
if sys.version_info[0] > 2:
return s
else:
return unicode(s)
return unicode(s) # pylint: disable=E0602

View File

@ -156,14 +156,6 @@ from lib.pim import (
pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/multicast_pim_static_rp.json".format(CWD)
try:
with open(jsonFile, "r") as topoJson:
TOPO = json.load(topoJson)
except IOError:
logger.info("Could not read file: %s", jsonFile)
# Global variables
GROUP_RANGE_ALL = "224.0.0.0/4"
GROUP_RANGE = "225.1.1.1/32"
@ -241,7 +233,10 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
tgen = Topogen(CreateTopo, mod.__name__)
json_file = "{}/multicast_pim_static_rp.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global TOPO
TOPO = tgen.json_topo
# ... and here it calls Mininet initialization functions.
@ -1269,7 +1264,7 @@ def test_send_join_on_higher_preffered_rp_p1(request):
shutdown_bringup_interface(tgen, dut, intf, False)
dut = "r1"
intf = "r1-r3-eth1"
intf = "r1-r3-eth2"
shutdown_bringup_interface(tgen, dut, intf, False)
step("r1 : Verify joinTx count before sending join")