-
Notifications
You must be signed in to change notification settings - Fork 752
Open
Description
我们正在基于dpvs做一些原型验证,在使用参考配置的fullnat模式对http的latency进行测试的时候,发现latency比纯nginx的似乎还要慢,这里的配置基本是根据例子中简单改了一下,配置如下:
! global config
global_defs {
log_level ERROR
log_file /var/log/dpvs.log
! <init> log_async_mode on
! <init> kni on
lldp on
}
! netif config
netif_defs {
<init> pktpool_size 524287
<init> pktpool_cache 256
!<init> device enP6574s1 {
<init> device eth0 {
rx {
queue_number 1
descriptor_number 1024
rss all
}
tx {
queue_number 1
descriptor_number 1024
}
! mtu 1500
! promisc_mode
! allmulticast
!kni_name enP6574s1.kni
kni_name eth0.kni
addr 10.2.0.119/24
}
}
! worker config (lcores)
worker_defs {
<init> worker cpu0 {
type master
cpu_id 0
}
<init> worker cpu1 {
type slave
cpu_id 1
!port enP6574s1 {
port eth0 {
rx_queue_ids 0
tx_queue_ids 0
! isol_rx_cpu_ids 9
! isol_rxq_ring_sz 1048576
}
}
}
! timer config
timer_defs {
# cpu job loops to schedule dpdk timer management
schedule_interval 500
}
! dpvs neighbor config
neigh_defs {
<init> unres_queue_length 128
timeout 60
}
! dpvs ipset config
ipset_defs {
<init> ipset_hash_pool_size 131072
}
! dpvs ipv4 config
ipv4_defs {
forwarding off
<init> default_ttl 64
fragment {
<init> bucket_number 4096
<init> bucket_entries 16
<init> max_entries 4096
<init> ttl 1
}
}
! dpvs ipv6 config
ipv6_defs {
disable off
forwarding off
route6 {
<init> method hlist
recycle_time 10
}
}
! control plane config
ctrl_defs {
lcore_msg {
<init> ring_size 4096
sync_msg_timeout_us 20000
priority_level low
}
tcp_sock {
enabled on
bind 10.2.1.118:9090
}
}
! ipvs config
ipvs_defs {
conn {
<init> conn_pool_size 2097152
<init> conn_pool_cache 256
conn_init_timeout 3
! expire_quiescent_template
! <init> fast_xmit_close
! <init> redirect off
}
udp {
! defence_udp_drop
uoa_mode opp
uoa_max_trail 3
timeout {
oneway 60
normal 300
last 3
}
}
tcp {
! defence_tcp_drop
timeout {
none 2
established 90
syn_sent 3
syn_recv 30
fin_wait 7
time_wait 7
close 3
close_wait 7
last_ack 7
listen 120
synack 30
last 2
}
synproxy {
synack_options {
mss 1452
ttl 63
sack
! wscale 0
! timestamp
}
close_client_window
! defer_rs_syn
rs_syn_max_retry 3
ack_storm_thresh 10
max_ack_saved 3
conn_reuse_state {
close
time_wait
! fin_wait
! close_wait
! last_ack
}
}
}
}
! sa_pool config
sa_pool {
<init> pool_hash_size 16
<init> flow_enable on
}
配置脚本如下:
VIP=10.2.0.119
LIP=10.2.0.117
RS=10.2.0.105
sudo ./bin/dpip addr add ${VIP}/24 dev eth0
sudo ./bin/ipvsadm -A -t ${VIP}:80 -s rr
sudo ./bin/ipvsadm -a -t ${VIP}:80 -r ${RS}:80 -b
sudo ./bin/ipvsadm --add-laddr -z ${LIP} -t ${VIP}:80 -F eth0
Realserver是一台返回静态页面的服务器,响应大小约1k字节,测试效果如下:
dpdk-box-3:/mnt/ramdisk$ wrk -c 1 -t 1 --duration 30s -s load.lua --latency http://10.2.0.119/
Running 30s test @ http://10.2.0.119/
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 836.17us 5.26ms 80.55ms 98.45%
Req/Sec 4.25k 563.83 4.92k 92.03%
Latency Distribution
P0.0000 158.00us
P50.0000 202.00us
P75.0000 219.00us
P90.0000 310.00us
P95.0000 530.00us
P99.0000 27.89ms
P99.9000 69.19ms
P99.9900 77.48ms
P99.9990 80.31ms
P99.9999 80.55ms
P100.0000 80.55ms
127332 requests in 30.10s, 94.84MB read
Requests/sec: 4230.34
Transfer/sec: 3.15MB
这里的延时到99%就已经大幅下降了,这纯dpdk的延时差距过大了,请帮忙看一下,大致的配置有没有明显的问题。
机器使用的是mlx5的网卡,性能上不是问题。
Metadata
Metadata
Assignees
Labels
No labels