0%

Netopeer2

这篇相对clixon_backend_restconf更乱一些,内容从开头的Netopeer2代码阅读到后续的RESTCONF实现细节,差别很大。这篇的有效内容更多,原因是我实现的RESTCONF server和Netopeer2-server用的东西更接近。

同样的,后续可能会把和RESTCONF实现相关的部分再抽离出来。

libyang 数据结构

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
/**
* @brief Generic structure for a data node, directly applicable to the data nodes defined as #LYS_CONTAINER, #LYS_LIST
* and #LYS_CHOICE.
*
* Completely fits to containers and choices and is compatible (can be used interchangeably except the #child member)
* with all other lyd_node_* structures. All data nodes are provides as ::lyd_node structure by default.
* According to the schema's ::lys_node#nodetype member, the specific object is supposed to be cast to
* ::lyd_node_leaf_list or ::lyd_node_anydata structures. This structure fits only to #LYS_CONTAINER, #LYS_LIST and
* #LYS_CHOICE values.
*
* To traverse all the child elements or attributes, use #LY_TREE_FOR or #LY_TREE_FOR_SAFE macro. To traverse
* the whole subtree, use #LY_TREE_DFS_BEGIN macro.
*/
struct lyd_node {
struct lys_node *schema; /**< pointer to the schema definition of this node */
uint8_t validity; /**< [validity flags](@ref validityflags) */
uint8_t dflt:1; /**< flag for implicit default node */
uint8_t when_status:3; /**< bit for checking if the when-stmt condition is
resolved - internal use only,
do not use this value! */
struct lyd_attr *attr; /**< pointer to the list of attributes of this node */
struct lyd_node *next; /**< pointer to the next sibling node
(NULL if there is no one) */
struct lyd_node *prev;
struct lyd_node *parent; /**< pointer to the parent node, NULL in case of root node */
struct lyd_node *child;
};
  • libyang、libnetconf2
  • sysrepo 依赖 libyang
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// main里的流程 server-init()
/* connect to the sysrepo */
rc = sr_connect("netopeer2", SR_CONN_DAEMON_REQUIRED | SR_CONN_DAEMON_START, &np2srv.sr_conn);
/* server session */
np2srv.sr_sess.ds = SR_DS_STARTUP;
np2srv.sr_sess.opts = SR_SESS_DEFAULT;
rc = sr_session_start(np2srv.sr_conn, np2srv.sr_sess.ds, np2srv.sr_sess.opts, &np2srv.sr_sess.srs);
/* init libyang context with schemas */--libyang TODO
np2srv_init_schemas()
/* init libnetconf2 */
nc_server_init(np2srv.ly_ctx)

/* set NETCONF operations callbacks */
snode = ly_ctx_get_node(np2srv.ly_ctx, NULL, "/ietf-netconf:get-config", 0);
nc_set_rpc_callback(snode, op_get);

snode = ly_ctx_get_node(np2srv.ly_ctx, NULL, "/ietf-netconf:edit-config", 0);
nc_set_rpc_callback(snode, op_editconfig);
  • 经常用到np2srv_sr_session_refresh(srs, NULL);注意理解
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
// 举例 去netopeer2/server/op_get_config.c 搜索sr_ 可以看所有和sysrepo相关的函数
// np2srv_ : netopeer2 to sysrepo v???
// op_sr2ly_ : operation sysrepo to libyang
struct nc_server_reply *
op_get(struct lyd_node *rpc, struct nc_session *ncs)
// 先是一堆看不懂的libyanglibnetconf2有关的操作(主要就是这一些)
// 以及一些dsupdate什么的
// filters
// 能知道开始用sysrepo的接口的是这一句:
/*
* create the data tree for the data reply
*/
for (i = 0; (signed)i < filter_count; i++) {
/* create the subtree */
if (op_sr2ly_subtree(sessions->srs, &root, filters[i], &ereply)) {
goto error;
}
}
//后面又是libyang和libnetconf2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// 接上,
int
op_sr2ly_subtree(sr_session_ctx_t *srs, struct lyd_node **root, const char *subtree_xpath, struct nc_server_reply **ereply)
// 涉及sysrepo的都上np2srv_sr_*的函数,这些函数在netopeer2/server/operations.c文件中重新封装,这个文件有3000+行
rc = np2srv_sr_get_items_iter(srs, full_subtree_xpath, &sriter, NULL);
while ((!np2srv_sr_get_item_next(srs, sriter, &value, NULL))) {
if (op_sr2ly(*root, value, &node, &cache)) {
sr_free_val(value);
sr_free_val_iter(sriter);
goto error;
}

if (!(*root)) {
*root = node;
}
sr_free_val(value);
}
sr_free_val_iter(sriter);
1
2
3
4
5
6
7
8
9
// 去netopeer2/server/operations.c 搜索sr_get_item
int np2srv_sr_get_item(sr_session_ctx_t *srs, const char *xpath,
sr_val_t **value, struct nc_server_reply **ereply)
pthread_rwlock_rdlock(&sr_lock); // sr_lock
if (!np2srv.disconnected) {
rc = sr_get_item(srs, xpath, value);
}
// 后面是一些错误处理
pthread_rwlock_unlock(&sr_lock);

schema

  • np2srv_init_schemas(void)

  • 从sysrepo里面获取yang的schema

  • build libyang context

  • use modules from sysrepo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
/**
* @brief Structure that contains information about a module installed in sysrepo.
*/
typedef struct sr_schema_s {
/**
* Memory context used internally by Sysrepo for efficient storage
* and conversion of this structure.
*/
sr_mem_ctx_t *_sr_mem;

const char *module_name; /**< Name of the module. */
const char *ns; /**< Namespace of the module used in @ref xp_page "XPath". */
const char *prefix; /**< Prefix of the module. */
bool installed; /**< TRUE if the module was explicitly installed. */
bool implemented; /**< TRUE if the module is implemented (does not have to be installed),
not just imported. */

sr_sch_revision_t revision; /**< Revision the module. */

sr_sch_submodule_t *submodules; /**< Array of all installed submodules of the module. */
size_t submodule_count; /**< Number of module's submodules. */

char **enabled_features; /**< Array of enabled features */
size_t enabled_feature_cnt; /**< Number of enabled feature */
} sr_schema_t;

libyang context ly_ctx

重要的数据结构

1
2
3
4
5
6
7
8
9
10
/* NETCONF - SYSREPO connections */
struct np2_sessions {
struct nc_session *ncs; /* NETCONF session */
sr_session_ctx_t *srs; /* SYSREPO session */
sr_datastore_t ds; /* current SYSREPO datastore */
sr_sess_options_t opts; /* current SYSREPO session options */

int flags; /* various flags */
#define NP2S_CAND_CHANGED 0x01
};
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/* Netopeer server internal data */
struct np2srv {
sr_conn_ctx_t *sr_conn; /**< sysrepo connection */
int disconnected; /**< flag marking that server is currently not connected to sysrepo */
struct np2_sessions sr_sess; /**< Netopeer's sysrepo sessions */
sr_subscription_ctx_t *sr_subscr; /**< sysrepo subscription context */

struct nc_pollsession *nc_ps; /**< libnetconf2 pollsession structure */
uint16_t nc_max_sessions; /**< maximum number of running sessions */
pthread_t workers[NP2SRV_THREAD_COUNT]; /**< worker threads handling sessions */

struct ly_ctx *ly_ctx; /**< libyang's context */
#ifdef NP2SRV_ENABLED_LY_CTX_INFO_CACHE
uint16_t cached_ly_ctx_module_set_id; /**< module-set-id at the time ly_ctx_info was last cached */
struct lyd_node *ly_ctx_info_cache; /**< a cache of calling ly_ctx_info on the ly_ctx */
#endif
pthread_rwlock_t ly_ctx_lock; /**< libyang's context rwlock */
};
1
2
3
4
5
6
7
8
9
10
11
12
13
struct ly_ctx {
struct dict_table dict;
struct ly_modules_list models;
ly_module_imp_clb imp_clb;
void *imp_clb_data;
ly_module_data_clb data_clb;
void *data_clb_data;
#ifdef LY_ENABLED_LYD_PRIV
void *(*priv_dup_clb)(const void *priv);
#endif
pthread_key_t errlist_key;
uint8_t internal_module_count;
};

libyang

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
/**
* @brief Structure describing an element in an XML tree.
*
* If the name item is NULL, then the content is part of the mixed content.
*
* Children elements are connected in a half ring doubly linked list:
* - first's prev pointer points to the last children
* - last's next pointer is NULL
*/
struct lyxml_elem {
char flags; /**< special flags */
#define LYXML_ELEM_MIXED 0x01 /* element contains mixed content */
/* 0x80 is reserved and cannot be set! */

struct lyxml_elem *parent; /**< parent node */
struct lyxml_attr *attr; /**< first attribute declared in the element */
struct lyxml_elem *child; /**< first children element */
struct lyxml_elem *next; /**< next sibling node */
struct lyxml_elem *prev; /**< previous sibling node */

const char *name; /**< name of the element */
const struct lyxml_ns *ns; /**< namespace of the element */
const char *content; /**< text content of the node if any */
};

解析的过程

in file libnetconf2/src/io.c

1
2
3
4
5
6
7
8
9
10
worker_thread(void *arg)
/* listen for incoming requests on active NETCONF sessions */
rc = nc_ps_poll(np2srv.nc_ps, 0, &ncs);
nc_ps_poll_session_io(cur_session, NC_SESSION_LOCK_TIMEOUT, ts_cur.tv_sec, msg);
nc_server_recv_rpc_io(cur_session, timeout, &rpc);
msgtype = nc_read_msg_io(session, io_timeout, &xml, 0);
/* build XML tree */
*data = lyxml_parse_mem(session->ctx, msg, 0);
/* 具体看代码*/
root = lyxml_parse_elem(ctx, c, &len, NULL, options);

rpc对比

clixon

  • clixon项目中没有使用query选项!!!
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
//代码生成rpc的部分,在cixon/lib/src/clixon_proto_client.c/clicon_rpc_get(h, path, &xret)
if ((cb = cbuf_new()) == NULL)
goto done;
cprintf(cb, "<rpc");
if ((username = clicon_username_get(h)) != NULL)
cprintf(cb, " username=\"%s\"", username);
cprintf(cb, "><get>");
if (xpath && strlen(xpath))
cprintf(cb, "<filter type=\"xpath\" select=\"%s\"/>", xpath);
cprintf(cb, "</get></rpc>");

// 对于这个get的rpc例子,其中填写的只有select的选项,填写的内容是xpath,我这里因为请求/restconf/data所以相当于是根目录

// log记录的样子
clicon_rpc_msg request:<rpc username="none"><get><filter type="xpath" select="/"/></get></rpc>

netopeer2

相关数据结构
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
// 定义在libnetconf2/src/messages_client.h里面
/**
* @brief Enumeration of RPC types
*
* Note that NC_RPC_CLOSE is not defined since sending \<close-session\> is done implicitly by nc_session_free()
*/
typedef enum {
NC_RPC_UNKNOWN = 0, /**< invalid RPC. */
NC_RPC_ACT_GENERIC, /**< user-defined generic RPC/action. */

/* ietf-netconf */
NC_RPC_GETCONFIG, /**< \<get-config\> RPC. */
NC_RPC_EDIT, /**< \<edit-config\> RPC. */
NC_RPC_COPY, /**< \<copy-config\> RPC. */
NC_RPC_DELETE, /**< \<delete-config\> RPC. */
NC_RPC_LOCK, /**< \<lock\> RPC. */
NC_RPC_UNLOCK, /**< \<unlock\> RPC. */
NC_RPC_GET, /**< \<get\> RPC. */
NC_RPC_KILL, /**< \<kill-session\> RPC. */
NC_RPC_COMMIT, /**< \<commit\> RPC. */
NC_RPC_DISCARD, /**< \<discard-changes\> RPC. */
NC_RPC_CANCEL, /**< \<cancel-commit\> RPC. */
NC_RPC_VALIDATE, /**< \<validate\> RPC. */

/* ietf-netconf-monitoring */
NC_RPC_GETSCHEMA, /**< \<get-schema\> RPC. */

/* notifications */
NC_RPC_SUBSCRIBE /**< \<create-subscription\> RPC. */
} NC_RPC_TYPE;

// 定义在libnetconf2/src/messages_p.h里面
// 这样的架构的目的是,有一个通用的nc_rpc类型,对应到具体类型的时候进行类型转换,有点cpp的子类的意思,应该是c里面的一种模拟
struct nc_rpc {
NC_RPC_TYPE type;
};
struct nc_rpc_get {
NC_RPC_TYPE type; /**< NC_RPC_GET */
char *filter; /**< either XML subtree (starts with '<') or an XPath (starts with '/' or an alpha) */
NC_WD_MODE wd_mode;
char free;
};
struct nc_rpc_edit {
NC_RPC_TYPE type; /**< NC_RPC_EDIT */
NC_DATASTORE target;
NC_RPC_EDIT_DFLTOP default_op;
NC_RPC_EDIT_TESTOPT test_opt;
NC_RPC_EDIT_ERROPT error_opt;
char *edit_cont; /**< either URL (starts with aplha) or config (starts with '<') */
char free;
};
cli
  • struct nc_rpc rpc -> lyd_node data -> send from cli to server
  • <rpc> -> lyxml_elem -> lyd_node (using lyd_parse_xml)
  • or maybe we can use lyd_parse_mem to do parsing job directly.
1
2
3
4
5
6
7
// 产生rpc 此时返回到一个struct nc_rpc 的对象,值得注意的是,在这个nc_rpc_get函数内部是用 struct nc_rpc_get 这个具体的rpc格式,然后返回的时候又退化为了通用形式,(后续会再从这个通用形式转化到具体形式)
// 这里的filter已经保存了cli输入的--filter-xpath 后面跟着的内容,是 char*
rpc = nc_rpc_get(filter, wd, NC_PARAMTYPE_CONST);
// 使用这个对象进行发送
ret = cli_send_recv(rpc, output, wd);
// 具体是这个函数
msgtype = nc_send_rpc(session, rpc, 1000, &msgid);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
// 更详细的,是上面那个函数内部的switch case
/* 这是一个rpc例子
<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"39\">
<get xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">
<filter type=\"xpath\" xmlns:bld=\"urn:building:test\"
select=\"/bld:*\"/>
</get>
</rpc>
*/
/* 所以说,看完了这个部分我们可以意识到:
- 一个data node节点包含<name xmlns="namespace">,这里的namespace是在创建node的时候指定的
- 可以向node里面插入attribute,会在node里面产生 attr=xxx的一段
一个问题:我创建filter的时候使用的namespace是ietf-netconf,为什么之后变成了urn:building:test,而且,前面的标志是xmlns:bld带了prefix*/
case NC_RPC_GET:
rpc_g = (struct nc_rpc_get *)rpc; // 将之前产生的rpc对象转换到特定的方式的struct(这里是get)
/* 新建一个lyd_node, 使用的lys_module是ietf-netconf, name是schema node name*/
data = lyd_new(NULL, ietfnc, "get");
if (rpc_g->filter) {
if (!rpc_g->filter[0] || (rpc_g->filter[0] == '<')) {
node = lyd_new_anydata(data, ietfnc, "filter", rpc_g->filter, LYD_ANYDATA_SXML);
lyd_insert_attr(node, NULL, "type", "subtree");
} else {
/* 我们用xpath,首先新建一个anydata的data node,
name是filter,不往里面填东西,类型conststring */
node = lyd_new_anydata(data, ietfnc, "filter", NULL, LYD_ANYDATA_CONSTSTRING);
/* 往里面插入两个attribute*/
lyd_insert_attr(node, NULL, "type", "xpath");
lyd_insert_attr(node, NULL, "select", rpc_g->filter);
}
if (!node) {
lyd_free(data);
return NC_MSG_ERROR;
}
}
// 经过上述的步骤生成的lyd_node gdb debug信息:
// 经过对比,和server解析后的结果的唯一差异就只是server解析后的填写了priv字段
/*
(gdb) p *data
$10 = {schema = 0x697440, validity = 0 '\000', dflt = 0 '\000', when_status = 0 '\000',
attr = 0x0, next = 0x0, prev = 0x6927d0, parent = 0x0, hash = 1772939101, ht = 0x0,
child = 0x691420}
(gdb) p *data
$11 = {name = 0x6717a0 "get",
dsc = 0x685800 "Retrieve running configuration and device state information.",
ref = 0x670ed0 "RFC 6241, Section 7.7", flags = 0, ext_size = 0 '\000',
iffeature_size = 0 '\000', padding = "\000\000\000", ext = 0x0, iffeature = 0x0,
module = 0x689150, nodetype = LYS_RPC, parent = 0x0, child = 0x6975c0,
next = 0x697540, prev = 0x68e460, priv = 0x0, hash = "\000\000\000"}

*/
/* 经lyd_validate以后,lyd_node的validty字段会变成'\000'*/
lyd_validate(&data,...)
/* send RPC, store its message ID 到这里,rpc已经转化到了lyd_node类型的data里面*/
r = nc_send_msg_io(session, timeout, data);
nc_write_msg_io(session, io_timeout, NC_MSG_RPC, op, NULL); //data输入到op
/* 下面是经过switch case选中的NC_MSG_RPC */
content = va_arg(ap, struct lyd_node *);
attrs = va_arg(ap, const char *); // 这个是空的(对于测试样例)
/* rpc头部,先经过buf转了一下*/
count = asprintf(&buf, "<rpc xmlns=\"%s\" message-id=\"%"PRIu64"\"%s>",
NC_NS_BASE, session->opts.client.msgid + 1, attrs ? attrs : "");
if (count == -1) {
ERRMEM;
ret = NC_MSG_ERROR;
goto cleanup;
}
/* 把头部的内容发出去 */
nc_write_clb((void *)&arg, buf, count, 0);
free(buf);
/* 调用libyang的函数把lyd_node输出xml形式,发送 */
if (lyd_print_clb(nc_write_xmlclb, (void *)&arg, content, LYD_XML, LYP_WITHSIBLINGS | LYP_NETCONF)) {
ret = NC_MSG_ERROR;
goto cleanup;
}
/* 把尾部的内容发出去 */
nc_write_clb((void *)&arg, "</rpc>", 6, 0);
server

简要的,这里解析的时候首先将xml格式的rpc解析到lyxml_elem,再解析到lyd_node

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
worker_thread(void *arg)
/* listen for incoming requests on active NETCONF sessions */
rc = nc_ps_poll(np2srv.nc_ps, 0, &ncs);
ret = nc_ps_poll_session_io(cur_session, NC_SESSION_LOCK_TIMEOUT,
ts_cur.tv_sec, msg);
ret = nc_server_recv_rpc_io(cur_session, timeout, &rpc);
msgtype = nc_read_msg_io(session, io_timeout, &xml, 0);
*data = lyxml_parse_mem(session->ctx, msg, 0);
/* 想要rpc 把断点打在 lyxml_parse_mem*/
/* $3 = 0x7fffd4007c50 "
<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"39\">
<get xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">
<filter type=\"xpath\" xmlns:bld=\"urn:building:test\"
select=\"/bld:*\"/>
</get>
</rpc>"...
*/
(*rpc)->tree = lyd_parse_xml(server_opts.ctx, &xml->child,
LYD_OPT_RPC | LYD_OPT_DESTRUCT | LYD_OPT_NOEXTDEPS |
LYD_OPT_STRICT, NULL);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 上述netopeer2测试的例子的building.yang
# 可以见到,select="bld:\" 选中的是prefix
module building {
yang-version 1;
namespace "urn:building:test";

prefix bld;

organization "building";
contact "building address";
description "yang model for buildings";
revision "2018-01-22" {
description "initial revision";
}

container rooms {
list room {
key room-number;
leaf room-number {
type uint16;
}
leaf size {
type uint32;
}
}
}
}

initiate

  • monitor是什么
    • netconf_monitoring.c, @brief netopeer2-server ietf-netconf-monitoring statistics and counters
    • restconf 和 netconf在monitoring上差异怎么解决
    • capability的初始化设定
  • poll session?
  • capability是什么
  • 加载yang module做的事情
    • sysrepo订阅np2srv_sr_subtree_change_subscribe
    • feature feature_change_ietf_system(np2srv.sr_sess.srs, "local-users", 1)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
server_init(void)
// connect sysrepo
rc = sr_connect("netopeer2", SR_CONN_DAEMON_REQUIRED |
SR_CONN_DAEMON_START, &np2srv.sr_conn);
rc = sr_session_start(np2srv.sr_conn, np2srv.sr_sess.ds,
np2srv.sr_sess.opts, &np2srv.sr_sess.srs);
/* init libyang context with schemas */
if (np2srv_init_schemas()) {
goto error;
}

/* init monitoring */
ncm_init();
stats.netconf_start_time = time(NULL);
pthread_mutex_init(&stats.lock, NULL);

/* init libnetconf2 */
nc_server_init(np2srv.ly_ctx);
nc_init();
/* set default <get-schema> callback if not specified */
rpc = ly_ctx_get_node(ctx, NULL, "/ietf-netconf-monitoring:get-schema", 0);
if (rpc && !rpc->priv) {
lys_set_private(rpc, nc_clb_default_get_schema);
}
/* set default <close-session> callback if not specififed */
rpc = ly_ctx_get_node(ctx, NULL, "/ietf-netconf:close-session", 0);
/* ... */

/* prepare poll session structure for libnetconf2 */
np2srv.nc_ps = nc_ps_new();

/* set with-defaults capability basic-mode */
nc_server_set_capab_withdefaults(NC_WD_EXPLICIT, NC_WD_ALL | NC_WD_ALL_TAG | NC_WD_TRIM | NC_WD_EXPLICIT);

/* set capabilities for the NETCONF Notifications */
nc_server_set_capability("urn:ietf:params:netconf:capability:notification:1.0");
nc_server_set_capability("urn:ietf:params:netconf:capability:interleave:1.0");

/* set NETCONF operations callbacks */
snode = ly_ctx_get_node(np2srv.ly_ctx, NULL, "/ietf-netconf:get-config", 0);
nc_set_rpc_callback(snode, op_get);

/* set Notifications subscription callback */
snode = ly_ctx_get_node(np2srv.ly_ctx, NULL, "/notifications:create-subscription", 0);
nc_set_rpc_callback(snode, op_ntf_subscribe);

/* set server options */
mod = ly_ctx_get_module(np2srv.ly_ctx, "ietf-netconf-server", NULL, 1);
if (mod && strcmp(NP2SRV_KEYSTORED_DIR, "none")) {
if (ietf_netconf_server_init(mod)) {
goto error;
}

mod = ly_ctx_get_module(np2srv.ly_ctx, "ietf-system", NULL, 1);
if (mod) {
if (ietf_system_init(mod)) {
goto error;
}
} else {
WRN("Sysrepo does not implement the \"ietf-system\" module, SSH publickey authentication will not work.");
}
} else {
WRN("Sysrepo does not have the \"ietf-netconf-server\" module or keystored keys dir unknown, using default NETCONF server options.");
nc_server_ssh_set_hostkey_clb(np2srv_default_hostkey_clb, NULL, NULL);
if (nc_server_add_endpt("main", NC_TI_LIBSSH)) {
goto error;
}
if (nc_server_endpt_set_address("main", "0.0.0.0")) {
goto error;
}
if (nc_server_endpt_set_port("main", 830)) {
goto error;
}
if (nc_server_ssh_endpt_add_hostkey("main", "default", -1)) {
goto error;
}
}

POST 实现

restconf

  • 没有datastore的概念
  • clixon流程:
    • edit-config: target = candidate; default-operation = none
    • commit
    • if netopeer have startup feature
      • copy-config to startup
  • 一个比较重要的差别:对比netopeer2的edit-config,restconf在url里面可能存在部分路径(datastore 与 data resource的区别。
1
edit-config [--help] --target running|candidate (--config[=<file>] | --url <url>) [--defop merge|replace|none] [--test set|test-only|test-then-set] [--error stop|continue|rollback]

netconf: edit-config

  • operation: Elements in the <config> subtree MAY contain an “operation” attribute, which belongs to the NETCONF namespace

  • netconf 里面定义attribute的operation = merge | replace | create | delete | remove

    • 在yang里面只定义了typedef edit-operation-type {,没看到使用啊 – 这个还是不知道
    • 为什么说 belongs to the NETCONF namespace? – 这个可以回答了,看下面的data的xml需要指定nc
    • <interface xc:operation="replace">
    • A: 感觉这个attribute是自己写的,需要执行的时候自己加上去,写在顶级?node里面
    • AA:前半句对,是自己写的,但是这个是在发送的config里面写的。=,写法如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# delete test
<rooms xmlns="urn:building:test" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<room nc:operation="delete">
<room-number>12</room-number>
<room-name>dabc</room-name>
</room>
</rooms>
# 这里的一个注意点是要在前面定义nc: xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0"
# 否则会报错如下,原因是不定义这个nc,解析的时候不知道是谁的operation
> edit-config --target running --config
ERROR
type: protocol
tag: unknown-attribute
severity: error
message: Invalid attribute "operation".
bad-attr #1: operation
bad-elem #1: /building:rooms/room

# netopeer2
(gdb) p *sessions->srs
$44 = {conn_ctx = 0x6492d0, id = 1891619541, lock = {__data = {__lock = 0, __count = 0, __
owner = 0, __nusers = 0, __kind = 0, __spins = 0, __elision = 0, __list = {__prev = 0x0, _
_next = 0x0}}, __size = '\000' <repeats 39 times>, __align = 0}, last_error = SR_ERR_OK, e
rror_info = 0x0, error_info_size = 0, error_cnt = 0, notif_session = false, commit_id = 0}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
(gdb) p data
$7 = 0x725ea0 "
<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"22\">
<edit-config xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">
<target>
<running/>
</target>
<config>// config 里面是我提交的部分
<rooms xmlns=\"urn:building:test\">\n
<room>\n
<room-number>2></room-number>\n
<room-name>abcd</room-name>\n
<size>100</size>\n
</room>\n
</rooms>\n
</config>
</edit-config>
</rpc>"

implement

  • 现在实现的是把url中的部分路径转换到data里面,但是我担忧理解可能有偏差,因为协议讲的是If the target resource type is a datastore or data resource, then the POST is treated as a request to create a top-level resource or child resource, respectively.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# datastore
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<rooms xmlns="urn:building:test"><room><room-number>2333</room-number><room-name>deabc</room-name><size>100</size></room></rooms>' http://localhost/restconf/data
# json
curl -s -i -X POST -H "Accept: application/yang-data+json" -d '{"building:rooms":{"room": [{"room-number":2333,"room-name":"deabc","size":100}]}}' http://localhost/restconf/data

{
"building:rooms":{
"room": [{
"room-number":2333,
"room-name":"deabc",
"size":100
}]
}
}
{"building:rooms":{"room": [{"room-number":2333,"room-name":"deabc","size":100}]}}

# data resource
# 有部分路径在url里面
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<room xmlns="urn:building:test"><room-number>2333</room-number><room-name>deabc</room-name><size>100</size></room>' http://localhost/restconf/data/building:rooms
# json
curl -s -i -X POST -H "Accept: application/yang-data+json" -d '{"building:room": [{"room-number":2333,"room-name":"deabc","size":100}]}' http://localhost/restconf/data/building:rooms

# 也就是说,我需要利用/building:rooms 去生成<rooms xmlns="urn:building:test"></rooms>
# 并且与原始的data信息拼装成完整的

# 多级container测试
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<storages xmlns="urn:building:test"><boxs><toys><toy-number>1</toy-number><toy-owner>tom</toy-owner></toys></boxs></storages>' http://localhost/restconf/data/

curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<toys xmlns="urn:building:test"><toy-number>1</toy-number><toy-owner>tom</toy-owner></toys>' http://localhost/restconf/data/building:storages/boxs

# jukebox test for multi-key
# datastore
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<jukebox xmlns="http://example.com/ns/example-jukebox"><library><artist><name>Foo Fighters</name><album><name>One by One</name><year>2012</year><song><name>song1</name><location>/home/music</location><format>MP3</format><length>286</length></song></album></artist></library></jukebox>' http://localhost/restconf/data

# data resource
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<song xmlns="http://example.com/ns/example-jukebox"><name>song1</name><location>music</location><format>MP3</format><length>286</length></song>' http://localhost/restconf/data/example-jukebox:jukebox/library/artist=Foo%20Fighters/album=Wasting%20Light


<jukebox xmlns="http://example.com/ns/example-jukebox">
<library>
<artist>
<name>Foo Fighters</name>
<album>
<name>One by One</name>
<year>2012</year>
<song>
<name>song1</name>
<location>/home/music</location>
<format>MP3</format>
<length>286</length>
</song>
</album>
</artist>
</library>
</jukebox>

double POST error

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 这部分在netopeer2里面会报错
# edit 的内容是:
<rooms xmlns="urn:building:test" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<room nc:operation="create">
<room-number>12</room-number>
<room-name>dabc</room-name>
<size>200</size>
</room>
</rooms>

> edit-config --target running --config
ERROR
type: application
tag: data-exists
severity: error
path: /building:rooms/room[room-number='2333'][room-name='deabc']
message: Request could not be completed because the relevant data model content already exists.

Coexistence with NETCONF

  • If the NETCONF server supports :writable-running, all edits to configuration nodes in {+restconf}/data are performed in the running configuration datastore.
  • Otherwise, if the device supports :candidate, all edits to configuration nodes in {+restconf}/data are performed in the candidate configuration datastore. The candidate MUST be automatically committed to running immediately after each successful edit.
  • 我当前的实现算是按照这两条来的,但是因为netconf是enable writable-running的,我猜第一条符合就忽略第二条,意味着我直接写到running里面是对的。

PUT

  • 在第四章开头写的PUT的对应
    • | PUT | <copy-config> (PUT on datastore) 这个奇怪的东西先不管
    • | PUT | <edit-config> (nc:operation=”create/replace”) 先做这个出来
  • Both the POST and PUT methods can be used to create data resources.
    • The difference is that for POST, the client does not provide the resource identifier for the resource that will be created.
    • The target resource for the POST method for resource creation is the parent of the new resource.
    • The target resource for the PUT method for resource creation is the new resource.
  • 提到的二者的差别是:
    • POST只能是创建新的,target写的是新建的对象的上一级
    • PUT根据情况可能会是create / replace,原因是PUT的target写的是目标对象自己
      • 首先检测有没有这个对象,如果没有的话就对应为创建
        • 涉及到的问题:如何检测?
      • 如果有这个对象,就把已有的对象replace成新的
        • 涉及到的问题:先删再加 / 修改? 需要看sysrepo的支持和edit-config
        • A:在edit-config里面的replace是这样做的:先删再改。
  • 提炼一下:PUT的对象由url确定,内容由data确定
    • 一个问题是,PUT的内容是否要求完整性?
    • 感觉是不要求的,还有默认值的问题
1
2
3
4
5
6
7
8
# replace test
<rooms xmlns="urn:building:test" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<room nc:operation="replace">
<room-number>12</room-number>
<room-name>dabc</room-name>
<size>200</size>
</room>
</rooms>
  • 测试发现,对一个不存在的对象使用edit-config op=replace,是默认带创建功能的,意味着我不需要手动检测了,直接replace就行,不存在create
1
2
3
4
# PUT还可以只改一个值啊
# Q: 这种应该如何定位,怕把别的同级别的东西也删掉了。
new "restconf PUT change type to eth0 (non-key sub-element to list)"
expectfn 'curl -s -X PUT -d {"example:type":"eth0"} http://localhost/restconf/data/example:cont1/interface=local0/type' 0 ""
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# test 
curl -s -i -X PUT -H "Accept: application/yang-data+xml" -d '<album xmlns="http://example.com/ns/example-jukebox"><name>Wasting Light</name><year>2031</year></album>' http://localhost/restconf/data/example-jukebox:jukebox/library/artist=Foo%20Fighters/album=Wasting%20Light
# 对比POST
curl -s -i -X POST -H "Accept: application/yang-data+xml" -d '<song xmlns="http://example.com/ns/example-jukebox"><name>song1</name><location>music</location><format>MP3</format><length>286</length></song>' http://localhost/restconf/data/example-jukebox:jukebox/library/artist=Foo%20Fighters/album=Wasting%20Light

# input
<album xmlns="http://example.com/ns/example-jukebox"><name>Wasting Light</name><genre>jbox:alternative</genre><year>2011</year></album>

# 首先去掉头尾
<name>Wasting Light</name><genre>jbox:alternative</genre><year>2011</year>

# 然后根据路径补全
<jukebox xmlns="http://example.com/ns/example-jukebox">
<library>
<artist>
<name>Foo Fighters</name>
<album>
<name>One by One</name>
<year>2012</year>
<song>
<name>song1</name>
<location>/home/music</location>
<format>MP3</format>
<length>286</length>
</song>
</album>
</artist>
</library>
</jukebox>

[INF]: api_path2xml striped data: [<name>Wasting Light</name><genre>jbox:alternative</genre><year>2011</year>]
[INF]: api_path2xml cxml: [<jukebox xmlns="http://example.com/ns/example-jukebox"><library><artist><name>Foo Fighters</name><album><name>Wasting Light</name><genre>jbox:alternative</genre><year>2011</year></album></artist></library></jukebox>]
################################
# json test
# test datastore的完整版
curl -s -i -X PATCH -H "Accept: application/yang-data+json" -d '{ "example-jukebox:jukebox" : { "library" : { "artist" : [ { "name" : "Fighters", "album" : [ { "name" : "Light", "genre" : "example-jukebox:alternative", "year" : 2011, "song" : [ { "name" : "Light", "location" : "/media/foo/a7/wasting-light.mp3", "format" : "MP3", "length" : 286 } ] } ] } ] } } }' http://localhost/restconf/data

# data resource版
curl -s -i -X PATCH -H "Accept: application/yang-data+json" -d '{ "example-jukebox:album" : [ { "name" : "Light", "genre" : "example-jukebox:alternative", "year" : 2011, "song" : [ { "name" : "Light", "location" : "/media/foo/a7/wasting light.mp3", "format" : "MP3", "length" : 286 } ] } ] }' http://localhost/restconf/data/example-jukebox:jukebox/library/artist=Fighters/album=Light
# 输入的pretty版
{
"example-jukebox:album" : [
{
"name" : "Light",
"genre" : "example-jukebox:alternative",
"year" : 2011,
"song" : [
{
"name" : "Light",
"location" : "/media/foo/a7/wasting-light.mp3",
"format" : "MP3",
"length" : 286
}
]
}
]
}
# 裁剪
{
"name" : "Light",
"genre" : "example-jukebox:alternative",
"year" : 2011,
"song" : [
{
"name" : "Light",
"location" : "/media/foo/a7/wasting-light.mp3",
"format" : "MP3",
"length" : 286
}
]
}
# first step modname + topcontainer//need cbtail
{
"example-jukebox:jukebox" : {
# second step container
{
"example-jukebox:jukebox" : {
"library" : { # new line
# third step list instance with key
{
"example-jukebox:jukebox" : {
"library" : {
"artist" : [ # new line
{ # new line
"name" : "Fighters", # new line key
# 懒得写了
{ "example-jukebox:jukebox" : { "library" : { "artist" : [ { "name" : "Fighters", "album" : [ { "name" : "Light", "name" : "Light", "genre" : "example-jukebox:alternative", "year" : 2011, "song" : [ { "name" : "Light", "location" : "/media/foo/a7/wasting light.mp3", "format" : "MP3", "length" : 286 } ] } ] } ] } } }


{ "example-jukebox:jukebox" : { "library" : { "artist" : [ { "name" : "Fighters", "album" : [ { "name" : "Light", "genre" : "example-jukebox:alternative", "year" : 2011, "song" : [ { "name" : "Light", "location" : "/media/foo/a7/wasting-light.mp3", "format" : "MP3", "length" : 286 } ] } ] } ] } } }
# 补全的完整版
{
"example-jukebox:jukebox" : {
"library" : {
"artist" : [
{
"name" : "Fighters",
"album" : [
{
"name" : "Light",
"genre" : "example-jukebox:alternative",
"year" : 2011,
"song" : [
{
"name" : "Light",
"location" : "/media/foo/a7/wasting-light.mp3",
"format" : "MP3",
"length" : 286
}
]
}
]
}
]
}
}
}

Error

  • 现在只调研过POST第二次CREATE导致的错误
  • 要了解这个error的格式是否是ietf-netconf的yang定义的
  • error有关的函数是否在libnetconf2里面
  • 还要了解对于restconf,error要看yang还是看协议正文里面的http错误码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# 测试xml
<rooms xmlns="urn:building:test" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<room nc:operation="create">
<room-number>12</room-number>
<room-name>dabc</room-name>
<size>200</size>
</room>
</rooms>

# gdb 断点位置提示
1 breakpoint keep y 0x000000000041a49c in op_editconfig
at /home/sparta/source/Netopeer2-0.7-r1
/server/op_editconfig.c:166
breakpoint already hit 1 time
2 breakpoint keep y 0x000000000041157b in np2srv_sr_set_item
at /home/sparta/source/Netopeer2-0.7-r1
/server/operations.c:136
breakpoint already hit 1 time
# 触发条件:第二次POST同一个内容
# 触发位置:
np2srv_sr_set_item 函数内部的
rc = sr_set_item(srs, xpath, value, opts);
# rc返回14, 在后续的switch case中,指向
case SR_ERR_DATA_EXISTS:
e = nc_err(NC_ERR_DATA_EXISTS, NC_ERR_TYPE_PROT);
nc_err_set_path(e, xpath);
if (*ereply) {
nc_server_reply_add_err(*ereply, e);
} else {
*ereply = nc_server_reply_err(e);
}
break;

# server端报错
[ERR]: (cl_request_process:543) Error by processing of the set-item request (session id=61
6876644): The item already exists.
[ERR]: (sr_set_item:1832) Error by processing of the request.
# client端报错
ERROR
type: application
tag: data-exists
severity: error
path: /building:rooms/room[room-number='12'][room-name='dabc']
message: Request could not be completed because the relevant data model content already exists.

# error的处理
session_server.c nc_server_send_reply_io function:
r = nc_write_msg_io(session, io_timeout, NC_MSG_REPLY, rpc->root, reply);
case NC_MSG_REPLY:
case NC_RPL_ERROR:
error_rpl = (struct nc_server_reply_error *)reply;
for (i = 0; i < error_rpl->count; ++i) {
nc_write_error(&arg, error_rpl->err[i], base_prefix);
}
break;

error 结构体的设定

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/* error主要就是type, tag, message, path, app-tag是用nc_err_set_app_tag加的*/
case SR_ERR_DATA_EXISTS:
/* nc_err里面会设定type, tag, message*/
e = nc_err(NC_ERR_DATA_EXISTS, NC_ERR_TYPE_PROT);
/* 这里设定path */
nc_err_set_path(e, xpath);
if (*ereply) {
nc_server_reply_add_err(*ereply, e);
} else {
*ereply = nc_server_reply_err(e);
}
break;
// 再列一下:
nc_err: 用type和tag创建一个error,内部会调用set_msg
nc_err_set_path
nc_err_set_app_tag
nc_err_set_msg
nc_err_set_sid
// 对于以上每个函数,都有对应的get替换掉set用来获取相应值

// 释放空间
nc_err_free

YANGs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
https://github.com/YangModels/yang/tree/master/standard/ietf/RFC
# basics
ietf-ip
ietf-interfaces
ietf-inet-types
ietf-yang-types
ietf-yang-metadata
ietf-yang-library
# netconf including:
ietf-netconf
ietf-netconf-notifications
ietf-netconf- acm
ietf-netconf-with-defaults
ietf-netconf-server
ietf-netconf-monitoring
ietf-ssh-server
ietf-tls-server
# restconf
ietf-restconf
ietf-restconf-monitoring
ietf-restconf-server #draft
ietf-restconf-transactions@2018-06-11.yang # 扩展draft
ietf-restconf-subscribed-notifications@2019-01-11.yang # 扩展draft
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# ietf-restconf-monitoring@2017-01-26.yang 
module: ietf-restconf-monitoring
+--ro restconf-state
+--ro capabilities
| +--ro capability* ietf-inet-types:uri
+--ro streams
+--ro stream* [name]
+--ro name string
+--ro description? string
+--ro replay-support? boolean <false>
+--ro replay-log-creation-time? ietf-yang-types:date-and-time
+--ro access* [encoding]
+--ro encoding string
+--ro location ietf-inet-types:uri
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# ietf-netconf-monitoring.yang 
module: ietf-netconf-monitoring
+--ro netconf-state
+--ro capabilities
| +--ro capability* ietf-inet-types:uri
+--ro datastores
| +--ro datastore* [name]
| +--ro name netconf-datastore-type
| +--ro locks!
| +--ro (lock-type)?
| +--:(global-lock)
| | +--ro global-lock
| | +--ro locked-by-session uint32
| | +--ro locked-time ietf-yang-types:date-and-time
| +--:(partial-lock)
| +--ro partial-lock* [lock-id]
| +--ro lock-id uint32
| +--ro locked-by-session uint32
| +--ro locked-time ietf-yang-types:date-and-time
| +--ro select* ietf-yang-types:xpath1.0
| +--ro locked-node* instance-identifier
+--ro schemas
| +--ro schema* [identifier version format]
| +--ro identifier string
| +--ro version string
| +--ro format identityref
| +--ro namespace ietf-inet-types:uri
| +--ro location* union
+--ro sessions
| +--ro session* [session-id]
| +--ro session-id uint32
| +--ro transport identityref
| +--ro username string
| +--ro source-host? ietf-inet-types:host
| +--ro login-time ietf-yang-types:date-and-time
| +--ro in-rpcs? ietf-yang-types:zero-based-counter32
| +--ro in-bad-rpcs? ietf-yang-types:zero-based-counter32
| +--ro out-rpc-errors? ietf-yang-types:zero-based-counter32
| +--ro out-notifications? ietf-yang-types:zero-based-counter32
+--ro statistics
+--ro netconf-start-time? ietf-yang-types:date-and-time
+--ro in-bad-hellos? ietf-yang-types:zero-based-counter32
+--ro in-sessions? ietf-yang-types:zero-based-counter32
+--ro dropped-sessions? ietf-yang-types:zero-based-counter32
+--ro in-rpcs? ietf-yang-types:zero-based-counter32
+--ro in-bad-rpcs? ietf-yang-types:zero-based-counter32
+--ro out-rpc-errors? ietf-yang-types:zero-based-counter32
+--ro out-notifications? ietf-yang-types:zero-based-counter32

rpcs:
+---x get-schema
+---- input
| +---w identifier string
| +---w version? string
| +---w format? identityref
+---- output
+--ro data? anyxml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
module: ietf-netconf

rpcs:
+---x get-config
| +---- input
| | +---w source
| | | +---w (config-source)
| | | +--:(running)
| | | +---w running? empty
| | +---w filter? anyxml
| +---- output
| +--ro data? anyxml
+---x edit-config
| +---- input
| +---w target
| | +---w (config-target)
| +---w default-operation? enumeration <merge>
| +---w error-option? enumeration <stop-on-error>
| +---w (edit-content)
| +--:(config)
| +---w config? anyxml
+---x copy-config
| +---- input
| +---w target
| | +---w (config-target)
| +---w source
| +---w (config-source)
| +--:(running)
| | +---w running? empty
| +--:(config)
| +---w config? anyxml
+---x delete-config
| +---- input
| +---w target
| +---w (config-target)
+---x lock
| +---- input
| +---w target
| +---w (config-target)
| +--:(running)
| +---w running? empty
+---x unlock
| +---- input
| +---w target
| +---w (config-target)
| +--:(running)
| +---w running? empty
+---x get
| +---- input
| | +---w filter? anyxml
| +---- output
| +--ro data? anyxml
+---x close-session
+---x kill-session
+---- input
+---w session-id session-id-type

ietf-netconf features:
writable-running (off)
candidate (off)
confirmed-commit (off)
rollback-on-error (off)
validate (off)
startup (off)
url (off)
xpath (off)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# ietf-netconf-server.yang
module: ietf-netconf-server
+--rw netconf-server
+--rw session-options
+--rw hello-timeout? uint16 <600>
module: ietf-netconf-server
+--rw netconf-server
+--rw session-options
| +--rw hello-timeout? uint16
+--rw listen {(ssh-listen or tls-listen)}?
| +--rw max-sessions? uint16
| +--rw idle-timeout? uint16
| +--rw endpoint* [name]
| +--rw name string
| +--rw (transport)
| +--:(ssh) {ssh-listen}?
| | +--rw ssh
| | +--rw address? inet:ip-address
| | +--rw port inet:port-number
| | +--rw host-keys
| | | +--rw host-key* [name]
| | | +--rw name string
| | | +--rw (type)?
| | | +--:(public-key)
| | | | +--rw public-key? -> /kc:keychain/p
rivate-keys/private-key/name
| | | +--:(certificate)
| | | +--rw certificate? -> /kc:keychain/p
rivate-keys/private-key/certificate-chains/certificate-chain/certificat
e {ssh-x509-certs}?
| | +--rw client-cert-auth {ssh-x509-certs}?
| | +--rw trusted-ca-certs? -> /kc:keychain/t
rusted-certificates/name
| | +--rw trusted-client-certs? -> /kc:keychain/t
rusted-certificates/name
| +--:(tls) {tls-listen}?
| +--rw tls
| +--rw address? inet:ip-address
| +--rw port inet:port-number
| +--rw certificates
| | +--rw certificate* [name]
| | +--rw name -> /kc:keychain/private-keys/p
rivate-key/certificate-chains/certificate-chain/certificate
| +--rw client-auth
| +--rw trusted-ca-certs? -> /kc:keychain/t
rusted-certificates/name
| +--rw trusted-client-certs? -> /kc:keychain/t
rusted-certificates/name
| +--rw cert-maps
| +--rw cert-to-name* [id]
| +--rw id uint32
| +--rw fingerprint x509c2n:tls-fingerpr
int
| +--rw map-type identityref
| +--rw name string
+--rw call-home {(ssh-call-home or tls-call-home)}?
+--rw netconf-client* [name]
+--rw name string
+--rw (transport)
| +--:(ssh) {ssh-call-home}?
| | +--rw ssh
| | +--rw endpoints
| | | +--rw endpoint* [name]
| | | +--rw name string
| | | +--rw address inet:host
| | | +--rw port? inet:port-number
| | +--rw host-keys
| | | +--rw host-key* [name]
| | | +--rw name string
| | | +--rw (type)?
| | | +--:(public-key)
| | | | +--rw public-key? -> /kc:keychain/p
rivate-keys/private-key/name
| | | +--:(certificate)
| | | +--rw certificate? -> /kc:keychain/p
rivate-keys/private-key/certificate-chains/certificate-chain/certificat
e {ssh-x509-certs}?
| | +--rw client-cert-auth {ssh-x509-certs}?
| | +--rw trusted-ca-certs? -> /kc:keychain/t
rusted-certificates/name
| | +--rw trusted-client-certs? -> /kc:keychain/t
rusted-certificates/name
| +--:(tls) {tls-call-home}?
| +--rw tls
| +--rw endpoints
| | +--rw endpoint* [name]
| | +--rw name string
| | +--rw address inet:host
| | +--rw port? inet:port-number
| +--rw certificates
| | +--rw certificate* [name]
| | +--rw name -> /kc:keychain/private-keys/p
rivate-key/certificate-chains/certificate-chain/certificate
| +--rw client-auth
| +--rw trusted-ca-certs? -> /kc:keychain/t
rusted-certificates/name
| +--rw trusted-client-certs? -> /kc:keychain/t
rusted-certificates/name
| +--rw cert-maps
| +--rw cert-to-name* [id]
| +--rw id uint32
| +--rw fingerprint x509c2n:tls-fingerpr
int
| +--rw map-type identityref
| +--rw name string
+--rw connection-type
| +--rw (connection-type)?
| +--:(persistent-connection)
| | +--rw persistent!
| | +--rw idle-timeout? uint32
| | +--rw keep-alives
| | +--rw max-wait? uint16
| | +--rw max-attempts? uint8
| +--:(periodic-connection)
| +--rw periodic!
| +--rw idle-timeout? uint16
| +--rw reconnect_timeout? uint16
+--rw reconnect-strategy
+--rw start-with? enumeration
+--rw max-attempts? uint8
##########################
ietf-netconf-server features:
listen (off)
ssh-listen (off)
tls-listen (off)
call-home (off)
ssh-call-home (off)
tls-call-home (off)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
module: ietf-restconf-server
+--rw restconf-server
+---u restconf-server-app-grouping

grouping restconf-server-grouping
+-- client-identification
+-- cert-maps
+---u x509c2n:cert-to-name
grouping restconf-server-listen-stack-grouping
+-- (transport)
+--:(http) {http-listen}?
| +-- http
| +-- external-endpoint
| | +-- address inet:ip-address
| | +-- port? inet:port-number
| +-- tcp-server-parameters
| | +---u tcps:tcp-server-grouping
| +-- http-server-parameters
| | +---u https:http-server-grouping
| +-- restconf-server-parameters
| +---u rcs:restconf-server-grouping
+--:(https) {https-listen}?
+-- https
+-- tcp-server-parameters
| +---u tcps:tcp-server-grouping
+-- tls-server-parameters
| +---u tlss:tls-server-grouping
+-- http-server-parameters
| +---u https:http-server-grouping
+-- restconf-server-parameters
+---u rcs:restconf-server-grouping
grouping restconf-server-callhome-stack-grouping
+-- (transport)
+--:(https) {https-listen}?
+-- https
+-- tcp-client-parameters
| +---u tcpc:tcp-client-grouping
+-- tls-server-parameters
| +---u tlss:tls-server-grouping
+-- http-server-parameters
| +---u https:http-server-grouping
+-- restconf-server-parameters
+---u rcs:restconf-server-grouping
grouping restconf-server-app-grouping
+-- listen! {https-listen}?
| +-- endpoint* [name]
| +-- name? string
| +---u restconf-server-listen-stack-grouping
+-- call-home! {https-call-home}?
+-- restconf-client* [name]
+-- name? string
+-- endpoints
| +-- endpoint* [name]
| +-- name? string
| +---u restconf-server-callhome-stack-grouping
+-- connection-type
| +-- (connection-type)
| +--:(persistent-connection)
| | +-- persistent!
| +--:(periodic-connection)
| +-- periodic!
| +-- period? uint16
| +-- anchor-time? yang:date-and-time
| +-- idle-timeout? uint16
+-- reconnect-strategy
+-- start-with? enumeration
+-- max-attempts? uint8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
module: ietf-netconf-notifications

notifications:
+---n netconf-config-change
| +--ro changed-by
| | +--ro (server-or-user)
| | +--:(server)
| | | +--ro server? empty
| | +--:(by-user)
| | +--ro username string
| | +--ro session-id ietf-netconf:session-id-or-zero-type
| | +--ro source-host? ietf-inet-types:ip-address
| +--ro datastore? enumeration <running>
| +--ro edit*
| +--ro target? instance-identifier
| +--ro operation? ietf-netconf:edit-operation-type
+---n netconf-capability-change
| +--ro changed-by
| | +--ro (server-or-user)
| | +--:(server)
| | | +--ro server? empty
| | +--:(by-user)
| | +--ro username string
| | +--ro session-id ietf-netconf:session-id-or-zero-type
| | +--ro source-host? ietf-inet-types:ip-address
| +--ro added-capability* ietf-inet-types:uri
| +--ro deleted-capability* ietf-inet-types:uri
| +--ro modified-capability* ietf-inet-types:uri
+---n netconf-session-start
| +--ro username string
| +--ro session-id ietf-netconf:session-id-or-zero-type
| +--ro source-host? ietf-inet-types:ip-address
+---n netconf-session-end
| +--ro username string
| +--ro session-id ietf-netconf:session-id-or-zero-type
| +--ro source-host? ietf-inet-types:ip-address
| +--ro killed-by? ietf-netconf:session-id-type
| +--ro termination-reason enumeration
+---n netconf-confirmed-commit
+--ro username string
+--ro session-id ietf-netconf:session-id-or-zero-type
+--ro source-host? ietf-inet-types:ip-address
+--ro confirm-event enumeration
+--ro timeout? uint32
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
module: ietf-system
+--rw system
| +--rw contact? string
| +--rw hostname? ietf-inet-types:domain-name
| +--rw location? string
| +--rw clock
| | +--rw (timezone)?
| | +--:(timezone-utc-offset)
| | +--rw timezone-utc-offset? int16
| +--rw dns-resolver
| +--rw search* ietf-inet-types:domain-name
| +--rw server* [name]
| | +--rw name string
| | +--rw (transport)
| | +--:(udp-and-tcp)
| | +--rw udp-and-tcp
| | +--rw address ietf-inet-types:ip-address
| +--rw options
| +--rw timeout? uint8 <5>
| +--rw attempts? uint8 <2>
+--ro system-state
+--ro platform
| +--ro os-name? string
| +--ro os-release? string
| +--ro os-version? string
| +--ro machine? string
+--ro clock
+--ro current-datetime? ietf-yang-types:date-and-time
+--ro boot-datetime? ietf-yang-types:date-and-time

rpcs:
+---x set-current-datetime
| +---- input
| +---w current-datetime ietf-yang-types:date-and-time
+---x system-restart
+---x system-shutdown
> feature ietf-system
ietf-system features:
radius (off)
authentication (off)
local-users (off)
radius-authentication (off)
ntp (off)
ntp-udp-port (off)
timezone-name (off)
dns-udp-tcp-port (off)

欢迎关注我的其它发布渠道