Skip to content

Commit 200808e

Browse files
jmreichakaelemc
andauthored
feat(asav): add support for asav (#2945)
Adds support for Cisco ASAv firewall. This change includes registering the new node type and implementing its initialization logic, allowing users to deploy Cisco ASAv nodes within their container-based network labs. Adds basic documentation and an example lab for usage. I tested a few things locally with the sample lab provided, not sure what else needs to be checked/verified in order to be accepted. --------- Co-authored-by: Kaelem Chandra <[email protected]>
1 parent 97db6e1 commit 200808e

File tree

8 files changed

+416
-0
lines changed

8 files changed

+416
-0
lines changed

core/register.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
clabnodesc8000 "github.com/srl-labs/containerlab/nodes/c8000"
1212
clabnodesceos "github.com/srl-labs/containerlab/nodes/ceos"
1313
clabnodescheckpoint_cloudguard "github.com/srl-labs/containerlab/nodes/checkpoint_cloudguard"
14+
clabnodescisco_asav "github.com/srl-labs/containerlab/nodes/cisco_asav"
1415
clabnodescisco_sdwan "github.com/srl-labs/containerlab/nodes/cisco_sdwan"
1516
clabnodescjunosevolved "github.com/srl-labs/containerlab/nodes/cjunosevolved"
1617
clabnodescrpd "github.com/srl-labs/containerlab/nodes/crpd"
@@ -77,6 +78,7 @@ func (c *CLab) RegisterNodes() { //nolint:funlen
7778
clabnodessrl.Register(c.Reg)
7879
clabnodessros.Register(c.Reg)
7980
clabnodesvr_aoscx.Register(c.Reg)
81+
clabnodescisco_asav.Register(c.Reg)
8082
clabnodesvr_csr.Register(c.Reg)
8183
clabnodesvr_c8000v.Register(c.Reg)
8284
clabnodesvr_freebsd.Register(c.Reg)

docs/lab-examples/asav01.md

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
| | |
2+
| ----------------------------- | -------------------------------------------------------------------- |
3+
| **Description** | A Cisco ASAv connected to two Alpine Linux Hosts |
4+
| **Components** | [Cisco ASAv][asav], [Multitool Alpine Linux][client] |
5+
| **Resource requirements**[^1] | :fontawesome-solid-microchip: 1 <br/>:fontawesome-solid-memory: 2 GB |
6+
| **Topology file** | [asav01.clab.yml][topofile] |
7+
| **Name** | asav01 |
8+
| **Version information**[^2] | `asav9-23-1.qcow2`, `docker:24.0.6` |
9+
10+
## Description
11+
12+
This lab consists of one Cisco ASAv firewall connected to two Alpine Linux nodes.
13+
14+
```
15+
client1<---->ASAv<---->client2
16+
```
17+
18+
## Configuration
19+
20+
The ASAv node takes about 5-7 minutes to complete its start up. Check using "docker container ls" and "docker logs -f clab-asav01-asav1" until the ASAv container shows up as "healthy".
21+
22+
```
23+
# docker container ls
24+
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
25+
5682d73984d1 vrnetlab/vr-asav:9.23.1 "/launch.py --userna…" 5 minutes ago Up 5 minutes (healthy) 22/tcp, 80/tcp, 443/tcp, 5000/tcp, 10000-10099/tcp clab-asav01-asav1
26+
1ebe3dae6846 wbitt/network-multitool:alpine-extra "/bin/sh /docker-ent…" 5 minutes ago Up 5 minutes 80/tcp, 443/tcp, 1180/tcp, 11443/tcp clab-asav01-client1
27+
9726c9bb9e21 wbitt/network-multitool:alpine-extra "/bin/sh /docker-ent…" 5 minutes ago Up 5 minutes 80/tcp, 443/tcp, 1180/tcp, 11443/tcp clab-asav01-client2
28+
```
29+
30+
### asav1
31+
32+
Log into the ASAv node using SSH and add the following configuration. Password is `CiscoAsa1!`.
33+
34+
```bash
35+
ssh admin@clab-asav01-asav1
36+
```
37+
38+
Optionally configure the ASA with any additional settings as needed.
39+
40+
### client1
41+
42+
The two clients should be configured with the correct IP addresses and a route to the other client via the ASAv node.
43+
First attach to the container process `docker exec -it clab-asav01-client1 bash`
44+
45+
```
46+
docker exec -it clab-asav01-client1 bash
47+
48+
# ip -br a show dev eth1
49+
eth0@if7 UP 172.20.20.4/24 3fff:172:20:20::4/64 fe80::a4ea:64ff:fe33:c15c/64
50+
51+
# ip route
52+
default via 172.20.20.1 dev eth0
53+
172.20.20.0/24 dev eth0 proto kernel scope link src 172.20.20.4
54+
55+
# ping 172.20.20.2
56+
PING 172.20.20.2 (172.20.20.2) 56(84) bytes of data.
57+
64 bytes from 172.20.20.2: icmp_seq=1 ttl=64 time=0.163 ms
58+
64 bytes from 172.20.20.2: icmp_seq=2 ttl=64 time=0.047 ms
59+
64 bytes from 172.20.20.2: icmp_seq=3 ttl=64 time=0.053 ms
60+
```
61+
62+
### client2
63+
64+
Similarly for client2, verify connectivity:
65+
66+
```
67+
docker exec -it clab-asav01-client2 bash
68+
69+
# ip -br a show dev eth1
70+
eth0@if5 UP 172.20.20.2/24 3fff:172:20:20::2/64 fe80::b86b:51ff:fed8:1c85/64
71+
72+
# ping 172.20.20.4
73+
PING 172.20.20.4 (172.20.20.4) 56(84) bytes of data.
74+
64 bytes from 172.20.20.4: icmp_seq=1 ttl=64 time=0.055 ms
75+
64 bytes from 172.20.20.4: icmp_seq=2 ttl=64 time=0.035 ms
76+
64 bytes from 172.20.20.4: icmp_seq=3 ttl=64 time=0.065 ms
77+
78+
# ping 172.20.20.6
79+
PING 172.20.20.6 (172.20.20.6) 56(84) bytes of data.
80+
From 172.20.20.2 icmp_seq=1 Destination Host Unreachable
81+
From 172.20.20.2 icmp_seq=2 Destination Host Unreachable
82+
From 172.20.20.2 icmp_seq=3 Destination Host Unreachable
83+
```
84+
85+
[topofile]: https://github.com/srl-labs/containerlab/tree/main/lab-examples/asav01/asav01.clab.yml

docs/manual/kinds/cisco_asav.md

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
---
2+
search:
3+
boost: 4
4+
kind_code_name: cisco_asav
5+
kind_display_name: Cisco ASAv
6+
---
7+
8+
# Cisco ASAv
9+
10+
[Cisco ASAv](https://www.cisco.com/c/en/us/products/collateral/security/adaptive-security-virtual-appliance-asav/adapt-security-virtual-appliance-ds.html) is identified with `cisco_asav` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format.
11+
12+
## Managing ASAv nodes
13+
14+
/// note
15+
Containers with Cisco ASAv inside will take ~5-7 min to fully boot.
16+
You can monitor the progress with `docker logs -f <container-name>`.
17+
///
18+
19+
To connect to a `bash` shell of a running ASAv container:
20+
21+
```bash
22+
docker exec -it <container-name/id> bash
23+
```
24+
25+
To connect to the ASAv CLI (password `CiscaoAsa1!`):
26+
27+
```bash
28+
ssh admin@<container-name>
29+
```
30+
31+
To connect to the serial port (console) exposed over TCP port 5000:
32+
33+
```bash
34+
# from container host
35+
telnet <container-name> 5000
36+
```
37+
38+
You can also connect to the container and use `telnet localhost 5000` if telnet is not available on your container host.
39+
40+
/// note
41+
Default user credentials (non-standard due to complexity length requirement): `admin:CiscoAsa1!`
42+
///
43+
44+
## Interface naming
45+
46+
You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-.
47+
48+
The interface naming convention is: `GigabitEthernet0/X` (or `Gi0/X`), where `X` is the port number.
49+
50+
With that naming convention in mind:
51+
52+
- `Gi0/0` - first data port available
53+
- `Gi0/1` - second data port, and so on...
54+
55+
/// note
56+
Data port numbering starts at `0`.
57+
///
58+
59+
The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM:
60+
61+
- `eth0` - management interface connected to the containerlab management network (rendered as `Management0/0` in the CLI)
62+
- `eth1` - first data interface, mapped to the first data port of the VM (rendered as `GigabitEthernet0/0`)
63+
- `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `GigabitEthernet0/1` and so on)
64+
65+
When containerlab launches -{{ kind_display_name }}- node the `Management0/0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP.
66+
67+
Data interfaces `GigabitEthernet0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces.
68+
69+
## Features and options
70+
71+
### Node configuration
72+
73+
Cisco ASAv nodes come up with a basic configuration where only the management interface and default `admin` user are provisioned.
74+
75+
#### User defined startup config
76+
77+
It is possible to make ASAv nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config:
78+
79+
```yaml
80+
topology:
81+
nodes:
82+
asav:
83+
kind: cisco_asav
84+
startup-config: myconfig.txt
85+
```
86+
87+
With this knob containerlab is instructed to take a file `myconfig.txt` from the directory that hosts the topology file, and copy it to the lab directory for that specific node under the `/config/startup-config.cfg` name. Then the directory that contains the startup-config dir is mounted to the container. This will result in this config being applied at startup by the node.
88+
89+
Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with.
90+
91+
## Lab examples
92+
93+
The following simple lab consists of two Linux hosts connected via one ASAv firewall node:
94+
95+
- [Cisco ASAv](../../lab-examples/asav01.md)
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
name: asav01
2+
3+
topology:
4+
nodes:
5+
# ASAv runs as a VM and is slower to boot. Use 'docker logs -f clab-asav01-asav1' to view progress
6+
asav1:
7+
kind: cisco_asav
8+
image: vrnetlab/cisco_asav:9-23-1
9+
10+
client1:
11+
kind: linux
12+
image: wbitt/network-multitool:alpine-extra
13+
14+
client2:
15+
kind: linux
16+
image: wbitt/network-multitool:alpine-extra
17+
18+
links:
19+
- endpoints: ["client1:eth1", "asav1:eth1"]
20+
- endpoints: ["client2:eth1", "asav1:eth2"]

mkdocs.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ nav:
2727
- Cisco SD-WAN: manual/kinds/cisco_sdwan.md
2828
- Cisco Catalyst 9000v: manual/kinds/vr-cat9kv.md
2929
- Cisco IOL: manual/kinds/cisco_iol.md
30+
- Cisco ASAv: manual/kinds/cisco_asav.md
3031
- Cisco FTDv: manual/kinds/vr-ftdv.md
3132
- Juniper:
3233
- Juniper cRPD: manual/kinds/crpd.md
@@ -169,6 +170,7 @@ nav:
169170
- RARE/freeRtr: lab-examples/rare-freertr.md
170171
- Juniper vSRX: lab-examples/vsrx01.md
171172
- OpenBSD: lab-examples/openbsd01.md
173+
- Cisco ASAv: lab-examples/asav01.md
172174
- Cisco FTDv: lab-examples/ftdv01.md
173175
- Templated labs:
174176
- Leaf-spine topology: lab-examples/templated01.md

nodes/cisco_asav/cisco_asav.go

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
// Copyright 2020 Nokia
2+
// Licensed under the BSD 3-Clause License.
3+
// SPDX-License-Identifier: BSD-3-Clause
4+
5+
package cisco_asav
6+
7+
import (
8+
"fmt"
9+
"path"
10+
"regexp"
11+
12+
clabnodes "github.com/srl-labs/containerlab/nodes"
13+
clabtypes "github.com/srl-labs/containerlab/types"
14+
clabutils "github.com/srl-labs/containerlab/utils"
15+
)
16+
17+
var (
18+
kindNames = []string{"cisco_asav"}
19+
defaultCredentials = clabnodes.NewCredentials("admin", "CiscoAsa1!")
20+
21+
InterfaceRegexp = regexp.MustCompile(`(?:GigabitEthernet|Gi)\s?0/(?P<port>\d+)`)
22+
InterfaceOffset = 0
23+
InterfaceHelp = "GigabitEthernet0/X or Gi0/X (where X >= 0) or ethX (where X >= 1)"
24+
)
25+
26+
const (
27+
scrapliPlatformName = "cisco_asa"
28+
)
29+
30+
// Register registers the node in the NodeRegistry.
31+
func Register(r *clabnodes.NodeRegistry) {
32+
platformAttrs := &clabnodes.PlatformAttrs{
33+
ScrapliPlatformName: scrapliPlatformName,
34+
}
35+
36+
nrea := clabnodes.NewNodeRegistryEntryAttributes(defaultCredentials, nil, platformAttrs)
37+
38+
r.Register(kindNames, func() clabnodes.Node {
39+
return new(asav)
40+
}, nrea)
41+
}
42+
43+
type asav struct {
44+
clabnodes.VRNode
45+
}
46+
47+
func (n *asav) Init(cfg *clabtypes.NodeConfig, opts ...clabnodes.NodeOption) error {
48+
// Init VRNode
49+
n.VRNode = *clabnodes.NewVRNode(n, defaultCredentials, scrapliPlatformName)
50+
// set virtualization requirement
51+
n.HostRequirements.VirtRequired = true
52+
53+
n.Cfg = cfg
54+
for _, o := range opts {
55+
o(n)
56+
}
57+
// env vars are used to set launch.py arguments in vrnetlab container
58+
defEnv := map[string]string{
59+
"CONNECTION_MODE": clabnodes.VrDefConnMode,
60+
"USERNAME": defaultCredentials.GetUsername(),
61+
"PASSWORD": defaultCredentials.GetPassword(),
62+
"DOCKER_NET_V4_ADDR": n.Mgmt.IPv4Subnet,
63+
"DOCKER_NET_V6_ADDR": n.Mgmt.IPv6Subnet,
64+
}
65+
n.Cfg.Env = clabutils.MergeStringMaps(defEnv, n.Cfg.Env)
66+
67+
// mount config dir to support startup-config functionality
68+
n.Cfg.Binds = append(
69+
n.Cfg.Binds,
70+
fmt.Sprint(path.Join(n.Cfg.LabDir, n.ConfigDirName), ":/config"),
71+
)
72+
73+
if n.Cfg.Env["CONNECTION_MODE"] == "macvtap" {
74+
// mount dev dir to enable macvtap
75+
n.Cfg.Binds = append(n.Cfg.Binds, "/dev:/dev")
76+
}
77+
78+
n.Cfg.Cmd = fmt.Sprintf(
79+
"--username %s --password %s --hostname %s --connection-mode %s --trace",
80+
n.Cfg.Env["USERNAME"],
81+
n.Cfg.Env["PASSWORD"],
82+
n.Cfg.ShortName,
83+
n.Cfg.Env["CONNECTION_MODE"],
84+
)
85+
86+
n.InterfaceRegexp = InterfaceRegexp
87+
n.InterfaceOffset = InterfaceOffset
88+
n.InterfaceHelp = InterfaceHelp
89+
90+
return nil
91+
}

0 commit comments

Comments
 (0)