[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1510089416-5945-3-git-send-email-dave.taht@gmail.com>
Date: Tue, 7 Nov 2017 13:16:54 -0800
From: Dave Taht <dave.taht@...il.com>
To: netdev@...r.kernel.org
Cc: Dave Taht <dave.taht@...il.com>
Subject: [PATCH iproute2 2/4] q_netem: utilize 64 bit nanosecond API for delay and jitter
This starts to obsolete the old "ticks" api in favor of well
defined nanoseconds for the kernel/userspace netem interface.
Signed-off-by: Dave Taht <dave.taht@...il.com>
---
tc/q_netem.c | 68 +++++++++++++++++++++++++++++++++++++++---------------------
1 file changed, 44 insertions(+), 24 deletions(-)
diff --git a/tc/q_netem.c b/tc/q_netem.c
index cdaddce..22a5b94 100644
--- a/tc/q_netem.c
+++ b/tc/q_netem.c
@@ -151,24 +151,6 @@ static int get_distribution(const char *type, __s16 *data, int maxdata)
#define NEXT_IS_SIGNED_NUMBER() \
(NEXT_ARG_OK() && (isdigit(argv[1][0]) || argv[1][0] == '-'))
-/* Adjust for the fact that psched_ticks aren't always usecs
- (based on kernel PSCHED_CLOCK configuration */
-static int get_ticks(__u32 *ticks, const char *str)
-{
- unsigned int t;
-
- if (get_time(&t, str))
- return -1;
-
- if (tc_core_time2big(t)) {
- fprintf(stderr, "Illegal %u time (too large)\n", t);
- return -1;
- }
-
- *ticks = tc_core_time2tick(t);
- return 0;
-}
-
static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
struct nlmsghdr *n)
{
@@ -185,6 +167,8 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
__u16 loss_type = NETEM_LOSS_UNSPEC;
int present[__TCA_NETEM_MAX] = {};
__u64 rate64 = 0;
+ __s64 latency64 = 0;
+ __s64 jitter64 = 0;
for ( ; argc > 0; --argc, ++argv) {
if (matches(*argv, "limit") == 0) {
@@ -196,14 +180,16 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
} else if (matches(*argv, "latency") == 0 ||
matches(*argv, "delay") == 0) {
NEXT_ARG();
- if (get_ticks(&opt.latency, *argv)) {
+ present[TCA_NETEM_LATENCY64] = 1;
+ if (get_time64(&latency64, *argv)) {
explain1("latency");
return -1;
}
if (NEXT_IS_NUMBER()) {
NEXT_ARG();
- if (get_ticks(&opt.jitter, *argv)) {
+ present[TCA_NETEM_JITTER64] = 1;
+ if (get_time64(&jitter64, *argv)) {
explain1("latency");
return -1;
}
@@ -437,7 +423,7 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
tail = NLMSG_TAIL(n);
if (reorder.probability) {
- if (opt.latency == 0) {
+ if (latency64 == 0) {
fprintf(stderr, "reordering not possible without specifying some delay\n");
explain();
return -1;
@@ -458,7 +444,7 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
}
}
- if (dist_data && (opt.latency == 0 || opt.jitter == 0)) {
+ if (dist_data && (latency64 == 0 || jitter64 == 0)) {
fprintf(stderr, "distribution specified but no latency and jitter values\n");
explain();
return -1;
@@ -467,6 +453,16 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv,
if (addattr_l(n, 1024, TCA_OPTIONS, &opt, sizeof(opt)) < 0)
return -1;
+ if (present[TCA_NETEM_LATENCY64] &&
+ addattr_l(n, 1024, TCA_NETEM_LATENCY64, &latency64,
+ sizeof(latency64)) < 0)
+ return -1;
+
+ if (present[TCA_NETEM_JITTER64] &&
+ addattr_l(n, 1024, TCA_NETEM_JITTER64, &jitter64,
+ sizeof(jitter64)) < 0)
+ return -1;
+
if (present[TCA_NETEM_CORR] &&
addattr_l(n, 1024, TCA_NETEM_CORR, &cor, sizeof(cor)) < 0)
return -1;
@@ -540,6 +536,8 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
const struct tc_netem_rate *rate = NULL;
int len;
__u64 rate64 = 0;
+ __s64 latency64 = 0;
+ __s64 jitter64 = 0;
SPRINT_BUF(b1);
@@ -559,6 +557,18 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
parse_rtattr(tb, TCA_NETEM_MAX, RTA_DATA(opt) + sizeof(qopt),
len);
+ if (tb[TCA_NETEM_LATENCY64]) {
+ if (RTA_PAYLOAD(tb[TCA_NETEM_LATENCY64]) <
+ sizeof(latency64))
+ return -1;
+ latency64 = rta_getattr_u64(tb[TCA_NETEM_LATENCY64]);
+ }
+ if (tb[TCA_NETEM_JITTER64]) {
+ if (RTA_PAYLOAD(tb[TCA_NETEM_JITTER64]) <
+ sizeof(jitter64))
+ return -1;
+ jitter64 = rta_getattr_u64(tb[TCA_NETEM_JITTER64]);
+ }
if (tb[TCA_NETEM_CORR]) {
if (RTA_PAYLOAD(tb[TCA_NETEM_CORR]) < sizeof(*cor))
return -1;
@@ -602,13 +612,23 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
fprintf(f, "limit %d", qopt.limit);
- if (qopt.latency) {
+ if (latency64) {
+ fprintf(f, " delay %s", sprint_time64(latency64, b1));
+
+ if (jitter64) {
+ fprintf(f, " %s", sprint_time64(jitter64, b1));
+ if (cor && cor->delay_corr)
+ fprintf(f, " %s",
+ sprint_percent(cor->delay_corr, b1));
+ }
+ } else if (qopt.latency) {
fprintf(f, " delay %s", sprint_ticks(qopt.latency, b1));
if (qopt.jitter) {
fprintf(f, " %s", sprint_ticks(qopt.jitter, b1));
if (cor && cor->delay_corr)
- fprintf(f, " %s", sprint_percent(cor->delay_corr, b1));
+ fprintf(f, " %s",
+ sprint_percent(cor->delay_corr, b1));
}
}
--
2.7.4
Powered by blists - more mailing lists