lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <32989F47-00DD-4211-A570-229DA46A8FDC@syseleven.de>
Date:	Tue, 21 Aug 2012 16:03:53 +0200
From:	Andrew Holway <a.holway@...eleven.de>
To:	kvm@...r.kernel.org
Subject: Getting VLANS to the vm ping / UDP working but TCP not 

Hi,

I am trying out a couple of methods to get VLANs to the VM. In both cases the VM can ping google et all without problem and DNS works fine but it does not want to do any TCP. I thought this might be a frame size problem but even using telnet (which I understand sends tiny packets) fails to work.

Why would udp / ping work fine when tcp fails?

I saw some kind of weird packet on the bridge when I was trying to connect to a web server running on the VM with telnet.

15:11:47.464656 01:00:00:0e:00:24 (oui Unknown) > 00:00:01:00:00:00 (oui Unknown), ethertype Unknown (0xdcd9), length 66: 
	0x0000:  84a8 0800 4510 0030 cd23 4000 4006 527b  ....E..0.#@.@.R{
	0x0010:  257b 6811 257b 6812 f487 0050 da75 1e54  %{h.%{h....P.u.T
	0x0020:  0000 0000 7002 ffff 7b65 0000 0204 05b4  ....p...{e......
	0x0030:  0402 0000

But its hard to repeat them.

Any ideas?

Thanks,

Andrew


a) vm001 is on node002 and has the following xml:

[root@...e002 ~]# virsh dumpxml vm001
...
   <interface type='bridge'>
     <mac address='00:00:00:00:00:0e'/>
     <source bridge='br0'/>
     <target dev='vnet0'/>
     <alias name='net0'/>
     <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
   </interface>
   <interface type='bridge'>
     <mac address='00:00:01:00:00:0e'/>
     <source bridge='br1'/>
     <target dev='vnet1'/>
     <model type='e1000'/>
     <alias name='net1'/>
     <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
   </interface>
…

[root@...01 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
   inet 127.0.0.1/8 scope host lo
   inet6 ::1/128 scope host 
      valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
   link/ether 00:00:00:00:00:0e brd ff:ff:ff:ff:ff:ff
   inet 10.141.100.1/16 brd 10.141.255.255 scope global eth0
   inet6 fe80::200:ff:fe00:e/64 scope link 
      valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
   link/ether 00:00:01:00:00:0e brd ff:ff:ff:ff:ff:ff

###
4: eth1.4@...1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
   link/ether 00:00:01:00:00:0e brd ff:ff:ff:ff:ff:ff
   inet 37.123.104.18/29 brd 37.123.104.23 scope global eth1.4
   inet6 fe80::200:1ff:fe00:e/64 scope link 
      valid_lft forever preferred_lft forever
###


[root@...e002 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
   inet 127.0.0.1/8 scope host lo
   inet6 ::1/128 scope host 
      valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state UNKNOWN qlen 1000
   link/ether 00:02:c9:34:67:31 brd ff:ff:ff:ff:ff:ff
   inet 192.168.0.1/24 scope global eth0
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
   link/ether 00:9c:02:24:1a:e0 brd ff:ff:ff:ff:ff:ff
   inet6 fe80::29c:2ff:fe24:1ae0/64 scope link 
      valid_lft forever preferred_lft forever
4: eth2: <BROADCAST,MULTICAST,PROMISC,UP,LOWER_UP> mtu 1522 qdisc mq state UP qlen 1000
   link/ether 00:9c:02:24:1a:e4 brd ff:ff:ff:ff:ff:ff
   inet6 fe80::29c:2ff:fe24:1ae4/64 scope link 
      valid_lft forever preferred_lft forever
5: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
   link/ether 00:9c:02:24:1a:e0 brd ff:ff:ff:ff:ff:ff
   inet 10.141.0.2/16 brd 10.141.255.255 scope global br0
   inet6 fe80::29c:2ff:fe24:1ae0/64 scope link 
      valid_lft forever preferred_lft forever
7: br1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1522 qdisc noqueue state UNKNOWN 
   link/ether 00:9c:02:24:1a:e4 brd ff:ff:ff:ff:ff:ff
   inet6 fe80::29c:2ff:fe24:1ae4/64 scope link 
      valid_lft forever preferred_lft forever
8: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
   link/ether 52:54:00:81:84:9f brd ff:ff:ff:ff:ff:ff
   inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
9: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 500
   link/ether 52:54:00:81:84:9f brd ff:ff:ff:ff:ff:ff
33: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
   link/ether fe:00:00:00:00:0e brd ff:ff:ff:ff:ff:ff
   inet6 fe80::fc00:ff:fe00:e/64 scope link 
      valid_lft forever preferred_lft forever
34: vnet1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1522 qdisc pfifo_fast state UNKNOWN qlen 500
   link/ether fe:00:01:00:00:0e brd ff:ff:ff:ff:ff:ff
   inet6 fe80::fc00:1ff:fe00:e/64 scope link 
      valid_lft forever preferred_lft forever

[root@...e002 ~]# sysctl -a | grep tcp
fs.nfs.nlm_tcpport = 0
fs.nfs.nfs_callback_tcpport = 0
net.netfilter.nf_conntrack_tcp_timeout_syn_sent = 120
net.netfilter.nf_conntrack_tcp_timeout_syn_recv = 60
net.netfilter.nf_conntrack_tcp_timeout_established = 432000
net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120
net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60
net.netfilter.nf_conntrack_tcp_timeout_last_ack = 30
net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120
net.netfilter.nf_conntrack_tcp_timeout_close = 10
net.netfilter.nf_conntrack_tcp_timeout_max_retrans = 300
net.netfilter.nf_conntrack_tcp_timeout_unacknowledged = 300
net.netfilter.nf_conntrack_tcp_loose = 1
net.netfilter.nf_conntrack_tcp_be_liberal = 0
net.netfilter.nf_conntrack_tcp_max_retrans = 3
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
net.ipv4.tcp_retrans_collapse = 1
net.ipv4.tcp_syn_retries = 5
net.ipv4.tcp_synack_retries = 5
net.ipv4.tcp_max_orphans = 262144
net.ipv4.tcp_max_tw_buckets = 262144
net.ipv4.tcp_keepalive_time = 1800
net.ipv4.tcp_keepalive_probes = 9
net.ipv4.tcp_keepalive_intvl = 75
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_syncookies = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_abort_on_overflow = 0
net.ipv4.tcp_stdurg = 0
net.ipv4.tcp_rfc1337 = 0
net.ipv4.tcp_max_syn_backlog = 2048
net.ipv4.tcp_orphan_retries = 0
net.ipv4.tcp_fack = 1
net.ipv4.tcp_reordering = 3
net.ipv4.tcp_ecn = 2
net.ipv4.tcp_dsack = 1
net.ipv4.tcp_mem = 12397248	16529664	24794496
net.ipv4.tcp_wmem = 4096	16384	4194304
net.ipv4.tcp_rmem = 4096	87380	4194304
net.ipv4.tcp_app_win = 31
net.ipv4.tcp_adv_win_scale = 2
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_frto = 2
net.ipv4.tcp_frto_response = 0
net.ipv4.tcp_low_latency = 0
net.ipv4.tcp_no_metrics_save = 0
net.ipv4.tcp_moderate_rcvbuf = 1
net.ipv4.tcp_tso_win_divisor = 3
net.ipv4.tcp_congestion_control = cubic
net.ipv4.tcp_abc = 0
net.ipv4.tcp_mtu_probing = 0
net.ipv4.tcp_base_mss = 512
net.ipv4.tcp_workaround_signed_windows = 0
net.ipv4.tcp_dma_copybreak = 262144
net.ipv4.tcp_slow_start_after_idle = 1
net.ipv4.tcp_available_congestion_control = cubic reno
net.ipv4.tcp_allowed_congestion_control = cubic reno
net.ipv4.tcp_max_ssthresh = 0
net.ipv4.tcp_thin_linear_timeouts = 0
net.ipv4.tcp_thin_dupack = 0
sunrpc.transports = tcp 1048576
sunrpc.transports = tcp-bc 1048576
sunrpc.tcp_slot_table_entries = 2
sunrpc.tcp_max_slot_table_entries = 65536
sunrpc.tcp_fin_timeout = 15


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ