Bug Summary

File:ozproto.c
Location:line 322, column 2
Description:Access to field 'next' results in a dereference of a null pointer

Annotated Source Code

1/* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6#include <linux1/init.h>
7#include <linux1/module.h>
8#include <linux1/timer.h>
9#include <linux1/sched.h>
10#include <linux1/netdevice.h>
11#include <linux1/errno.h>
12#include <linux1/ieee80211.h>
13#include "ozconfig.h"
14#include "ozprotocol.h"
15#include "ozeltbuf.h"
16#include "ozpd.h"
17#include "ozproto.h"
18#include "ozusbsvc.h"
19#include "oztrace.h"
20#include "ozappif.h"
21#include "ozevent.h"
22#include <asm/unaligned.h>
23#include <linux1/uaccess.h>
24#include <net/psnap.h>
25/*------------------------------------------------------------------------------
26 */
27#define OZ_CF_CONN_SUCCESS1 1
28#define OZ_CF_CONN_FAILURE2 2
29
30#define OZ_DO_STOP1 1
31#define OZ_DO_SLEEP2 2
32
33/* States of the timer.
34 */
35#define OZ_TIMER_IDLE0 0
36#define OZ_TIMER_SET1 1
37#define OZ_TIMER_IN_HANDLER2 2
38
39#define OZ_MAX_TIMER_POOL_SIZE16 16
40
41/*------------------------------------------------------------------------------
42 */
43struct oz_binding {
44 struct packet_type ptype;
45 char name[OZ_MAX_BINDING_LEN32];
46 struct oz_binding *next;
47};
48
49struct oz_timer {
50 struct list_head link;
51 struct oz_pd *pd;
52 unsigned long due_time;
53 int type;
54};
55/*------------------------------------------------------------------------------
56 * Static external variables.
57 */
58static DEFINE_SPINLOCK(g_polling_lock)spinlock_t g_polling_lock = (spinlock_t ) { { .rlock = { .raw_lock
= { { 0 } }, } } }
;
59static LIST_HEAD(g_pd_list)struct list_head g_pd_list = { &(g_pd_list), &(g_pd_list
) }
;
60static struct oz_binding *g_binding ;
61static DEFINE_SPINLOCK(g_binding_lock)spinlock_t g_binding_lock = (spinlock_t ) { { .rlock = { .raw_lock
= { { 0 } }, } } }
;
62static struct sk_buff_head g_rx_queue;
63static u8 g_session_id;
64static u16 g_apps = 0x1;
65static int g_processing_rx;
66static struct timer_list g_timer;
67static struct oz_timer *g_cur_timer;
68static struct list_head *g_timer_pool;
69static int g_timer_pool_count;
70static int g_timer_state = OZ_TIMER_IDLE0;
71static LIST_HEAD(g_timer_list)struct list_head g_timer_list = { &(g_timer_list), &(
g_timer_list) }
;
72/*------------------------------------------------------------------------------
73 */
74static void oz_protocol_timer_start(void);
75/*------------------------------------------------------------------------------
76 * Context: softirq-serialized
77 */
78static u8 oz_get_new_session_id(u8 exclude)
79{
80 if (++g_session_id == 0)
81 g_session_id = 1;
82 if (g_session_id == exclude) {
83 if (++g_session_id == 0)
84 g_session_id = 1;
85 }
86 return g_session_id;
87}
88/*------------------------------------------------------------------------------
89 * Context: softirq-serialized
90 */
91static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
92{
93 struct sk_buff *skb;
94 struct net_device *dev = pd->net_dev;
95 struct oz_hdr *oz_hdr;
96 struct oz_elt *elt;
97 struct oz_elt_connect_rsp *body;
98 int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
99 sizeof(struct oz_elt_connect_rsp);
100 skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev)(((((dev)->hard_header_len+(dev)->needed_headroom)&
~(16 - 1)) + 16)+(dev)->needed_tailroom)
, GFP_ATOMIC((( gfp_t)0x20u)));
101 if (skb == NULL((void *)0))
102 return;
103 skb_reserve(skb, LL_RESERVED_SPACE(dev)((((dev)->hard_header_len+(dev)->needed_headroom)&~
(16 - 1)) + 16)
);
104 skb_reset_network_header(skb);
105 oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
106 elt = (struct oz_elt *)(oz_hdr+1);
107 body = (struct oz_elt_connect_rsp *)(elt+1);
108 skb->dev = dev;
109 skb->protocol = htons(OZ_ETHERTYPE)(( __be16)(__builtin_constant_p((__u16)((0x892e))) ? ((__u16)
( (((__u16)((0x892e)) & (__u16)0x00ffU) << 8) | (((
__u16)((0x892e)) & (__u16)0xff00U) >> 8))) : __fswab16
((0x892e))))
;
110 /* Fill in device header */
111 if (dev_hard_header(skb, dev, OZ_ETHERTYPE0x892e, pd->mac_addr,
112 dev->dev_addr, skb->len) < 0) {
113 kfree_skb(skb);
114 return;
115 }
116 oz_hdr->control = (OZ_PROTOCOL_VERSION0x1<<OZ_VERSION_SHIFT2);
117 oz_hdr->last_pkt_num = 0;
118 put_unaligned(0, &oz_hdr->pkt_num)({ void *__gu_p = (&oz_hdr->pkt_num); switch (sizeof(*
(&oz_hdr->pkt_num))) { case 1: *(u8 *)__gu_p = ( u8)(0
); break; case 2: put_unaligned_le16(( u16)(0), __gu_p); break
; case 4: put_unaligned_le32(( u32)(0), __gu_p); break; case 8
: put_unaligned_le64(( u64)(0), __gu_p); break; default: __bad_unaligned_access_size
(); break; } (void)0; })
;
119 oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, NULL, 0)do { if ((1<<(11)) & g_evt_mask) oz_event_log2(11, 0
, 0, ((void *)0), 0); } while (0)
;
120 elt->type = OZ_ELT_CONNECT_RSP0x07;
121 elt->length = sizeof(struct oz_elt_connect_rsp);
122 memset(body, 0, sizeof(struct oz_elt_connect_rsp));
123 body->status = status;
124 if (status == 0) {
125 body->mode = pd->mode;
126 body->session_id = pd->session_id;
127 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps)({ void *__gu_p = (&body->apps); switch (sizeof(*(&
body->apps))) { case 1: *(u8 *)__gu_p = ( u8)((( __le16)(__u16
)(pd->total_apps))); break; case 2: put_unaligned_le16(( u16
)((( __le16)(__u16)(pd->total_apps))), __gu_p); break; case
4: put_unaligned_le32(( u32)((( __le16)(__u16)(pd->total_apps
))), __gu_p); break; case 8: put_unaligned_le64(( u64)((( __le16
)(__u16)(pd->total_apps))), __gu_p); break; default: __bad_unaligned_access_size
(); break; } (void)0; })
;
128 }
129 oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
130 dev_queue_xmit(skb);
131 return;
132}
133/*------------------------------------------------------------------------------
134 * Context: softirq-serialized
135 */
136static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
137{
138 unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK0x3f;
139
140 switch (kalive & OZ_KALIVE_TYPE_MASK0xc0) {
141 case OZ_KALIVE_SPECIAL0x00:
142 pd->keep_alive_j =
143 oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20)msecs_to_jiffies(keep_alive * 1000*60*60*24*20);
144 break;
145 case OZ_KALIVE_SECS0x40:
146 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000)msecs_to_jiffies(keep_alive*1000);
147 break;
148 case OZ_KALIVE_MINS0x80:
149 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60)msecs_to_jiffies(keep_alive*1000*60);
150 break;
151 case OZ_KALIVE_HOURS0xc0:
152 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60)msecs_to_jiffies(keep_alive*1000*60*60);
153 break;
154 default:
155 pd->keep_alive_j = 0;
156 }
157 oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
158}
159/*------------------------------------------------------------------------------
160 * Context: softirq-serialized
161 */
162static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
163{
164 if (presleep)
165 pd->presleep_j = oz_ms_to_jiffies(presleep*100)msecs_to_jiffies(presleep*100);
166 else
167 pd->presleep_j = OZ_PRESLEEP_TOUT_J(11*1000);
168 oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
169}
170/*------------------------------------------------------------------------------
171 * Context: softirq-serialized
172 */
173static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
174 u8 *pd_addr, struct net_device *net_dev)
175{
176 struct oz_pd *pd;
177 struct oz_elt_connect_req *body =
178 (struct oz_elt_connect_req *)(elt+1);
179 u8 rsp_status = OZ_STATUS_SUCCESS0;
180 u8 stop_needed = 0;
181 u16 new_apps = g_apps;
182 struct net_device *old_net_dev = NULL((void *)0);
183 struct oz_pd *free_pd = NULL((void *)0);
184 if (cur_pd) {
185 pd = cur_pd;
186 spin_lock_bh(&g_polling_lock);
187 } else {
188 struct oz_pd *pd2 = NULL((void *)0);
189 struct list_head *e;
190 pd = oz_pd_alloc(pd_addr);
191 if (pd == NULL((void *)0))
192 return NULL((void *)0);
193 pd->last_rx_time_j = jiffies;
194 spin_lock_bh(&g_polling_lock);
195 list_for_each(e, &g_pd_list)for (e = (&g_pd_list)->next; e != (&g_pd_list); e =
e->next)
{
196 pd2 = container_of(e, struct oz_pd, link)({ const typeof( ((struct oz_pd *)0)->link ) *__mptr = (e)
; (struct oz_pd *)( (char *)__mptr - __builtin_offsetof(struct
oz_pd,link) );})
;
197 if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN6) == 0) {
198 free_pd = pd;
199 pd = pd2;
200 break;
201 }
202 }
203 if (pd != pd2)
204 list_add_tail(&pd->link, &g_pd_list);
205 }
206 if (pd == NULL((void *)0)) {
207 spin_unlock_bh(&g_polling_lock);
208 return NULL((void *)0);
209 }
210 if (pd->net_dev != net_dev) {
211 old_net_dev = pd->net_dev;
212 dev_hold(net_dev);
213 pd->net_dev = net_dev;
214 }
215 oz_trace("Host vendor: %d\n", body->host_vendor);
216 pd->max_tx_size = OZ_MAX_TX_SIZE1514;
217 pd->mode = body->mode;
218 pd->pd_info = body->pd_info;
219 if (pd->mode & OZ_F_ISOC_NO_ELTS0x40) {
220 pd->ms_per_isoc = body->ms_per_isoc;
221 if (!pd->ms_per_isoc)
222 pd->ms_per_isoc = 4;
223
224 switch (body->ms_isoc_latency & OZ_LATENCY_MASK0xc0) {
225 case OZ_ONE_MS_LATENCY0x40:
226 pd->isoc_latency = (body->ms_isoc_latency &
227 ~OZ_LATENCY_MASK0xc0) / pd->ms_per_isoc;
228 break;
229 case OZ_TEN_MS_LATENCY0x80:
230 pd->isoc_latency = ((body->ms_isoc_latency &
231 ~OZ_LATENCY_MASK0xc0) * 10) / pd->ms_per_isoc;
232 break;
233 default:
234 pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC32;
235 }
236 }
237 if (body->max_len_div16)
238 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
239 oz_trace("Max frame:%u Ms per isoc:%u\n",
240 pd->max_tx_size, pd->ms_per_isoc);
241 pd->max_stream_buffering = 3*1024;
242 pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J(2*1000);
243 pd->pulse_period_j = OZ_QUANTUM_J(msecs_to_jiffies(8));
244 pd_set_presleep(pd, body->presleep);
245 pd_set_keepalive(pd, body->keep_alive);
246
247 new_apps &= le16_to_cpu(get_unaligned(&body->apps))(( __u16)(__le16)((( typeof(*(&body->apps)))({ __builtin_choose_expr
(sizeof(*(&body->apps)) == 1, *(&body->apps), __builtin_choose_expr
(sizeof(*(&body->apps)) == 2, get_unaligned_le16((&
body->apps)), __builtin_choose_expr(sizeof(*(&body->
apps)) == 4, get_unaligned_le32((&body->apps)), __builtin_choose_expr
(sizeof(*(&body->apps)) == 8, get_unaligned_le64((&
body->apps)), __bad_unaligned_access_size())))); }))))
;
248 if ((new_apps & 0x1) && (body->session_id)) {
249 if (pd->session_id) {
250 if (pd->session_id != body->session_id) {
251 rsp_status = OZ_STATUS_SESSION_MISMATCH5;
252 goto done;
253 }
254 } else {
255 new_apps &= ~0x1; /* Resume not permitted */
256 pd->session_id =
257 oz_get_new_session_id(body->session_id);
258 }
259 } else {
260 if (pd->session_id && !body->session_id) {
261 rsp_status = OZ_STATUS_SESSION_TEARDOWN6;
262 stop_needed = 1;
263 } else {
264 new_apps &= ~0x1; /* Resume not permitted */
265 pd->session_id =
266 oz_get_new_session_id(body->session_id);
267 }
268 }
269done:
270 if (rsp_status == OZ_STATUS_SUCCESS0) {
271 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
272 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
273 u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
274 spin_unlock_bh(&g_polling_lock);
275 oz_pd_set_state(pd, OZ_PD_S_CONNECTED0x2);
276 oz_timer_delete(pd, OZ_TIMER_STOP3);
277 oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
278 new_apps, pd->total_apps, pd->paused_apps);
279 if (start_apps) {
280 if (oz_services_start(pd, start_apps, 0))
281 rsp_status = OZ_STATUS_TOO_MANY_PDS2;
282 }
283 if (resume_apps)
284 if (oz_services_start(pd, resume_apps, 1))
285 rsp_status = OZ_STATUS_TOO_MANY_PDS2;
286 if (stop_apps)
287 oz_services_stop(pd, stop_apps, 0);
288 oz_pd_request_heartbeat(pd);
289 } else {
290 spin_unlock_bh(&g_polling_lock);
291 }
292 oz_send_conn_rsp(pd, rsp_status);
293 if (rsp_status != OZ_STATUS_SUCCESS0) {
294 if (stop_needed)
295 oz_pd_stop(pd);
296 oz_pd_put(pd);
297 pd = NULL((void *)0);
298 }
299 if (old_net_dev)
300 dev_put(old_net_dev);
301 if (free_pd)
302 oz_pd_destroy(free_pd);
303 return pd;
304}
305/*------------------------------------------------------------------------------
306 * Context: softirq-serialized
307 */
308static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
309 u8 *report, u8 len)
310{
311 struct oz_farewell *f;
312 struct oz_farewell *f2;
313 int found = 0;
314 f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC((( gfp_t)0x20u)));
315 if (!f)
16
Taking false branch
316 return;
317 f->ep_num = ep_num;
318 f->index = index;
319 memcpy(f->report, report, len)({ size_t __len = (len); void *__ret; if (__builtin_constant_p
(len) && __len >= 64) __ret = __memcpy((f->report
), (report), __len); else __ret = __builtin_memcpy((f->report
), (report), __len); __ret; })
;
320 oz_trace("RX: Adding farewell report\n");
321 spin_lock(&g_polling_lock);
322 list_for_each_entry(f2, &pd->farewell_list, link)for (f2 = ({ const typeof( ((typeof(*f2) *)0)->link ) *__mptr
= ((&pd->farewell_list)->next); (typeof(*f2) *)( (
char *)__mptr - __builtin_offsetof(typeof(*f2),link) );}); &
f2->link != (&pd->farewell_list); f2 = ({ const typeof
( ((typeof(*f2) *)0)->link ) *__mptr = (f2->link.next);
(typeof(*f2) *)( (char *)__mptr - __builtin_offsetof(typeof(
*f2),link) );}))
{
17
Within the expansion of the macro 'list_for_each_entry':
a
Access to field 'next' results in a dereference of a null pointer
323 if ((f2->ep_num == ep_num) && (f2->index == index)) {
324 found = 1;
325 list_del(&f2->link);
326 break;
327 }
328 }
329 list_add_tail(&f->link, &pd->farewell_list);
330 spin_unlock(&g_polling_lock);
331 if (found)
332 kfree(f2);
333}
334/*------------------------------------------------------------------------------
335 * Context: softirq-serialized
336 */
337static void oz_rx_frame(struct sk_buff *skb)
338{
339 u8 *mac_hdr;
340 u8 *src_addr;
341 struct oz_elt *elt;
342 int length;
343 struct oz_pd *pd = NULL((void *)0);
344 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
345 int dup = 0;
346 u32 pkt_num;
347
348 oz_event_log(OZ_EVT_RX_PROCESS, 0,do { if ((1<<(1)) & g_evt_mask) oz_event_log2(1, 0,
(((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num
, ((void *)0), oz_hdr->pkt_num); } while (0)
349 (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,do { if ((1<<(1)) & g_evt_mask) oz_event_log2(1, 0,
(((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num
, ((void *)0), oz_hdr->pkt_num); } while (0)
350 NULL, oz_hdr->pkt_num)do { if ((1<<(1)) & g_evt_mask) oz_event_log2(1, 0,
(((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num
, ((void *)0), oz_hdr->pkt_num); } while (0)
;
351 oz_trace2(OZ_TRACE_RX_FRAMES,
352 "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
353 oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
354 mac_hdr = skb_mac_header(skb);
355 src_addr = &mac_hdr[ETH_ALEN6] ;
356 length = skb->len;
357
358 /* Check the version field */
359 if (oz_get_prot_ver(oz_hdr->control)(((oz_hdr->control) & 0xc) >> 2) != OZ_PROTOCOL_VERSION0x1) {
10
Taking false branch
360 oz_trace("Incorrect protocol version: %d\n",
361 oz_get_prot_ver(oz_hdr->control));
362 goto done;
363 }
364
365 pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num))(( __u32)(__le32)((( typeof(*(&oz_hdr->pkt_num)))({ __builtin_choose_expr
(sizeof(*(&oz_hdr->pkt_num)) == 1, *(&oz_hdr->pkt_num
), __builtin_choose_expr(sizeof(*(&oz_hdr->pkt_num)) ==
2, get_unaligned_le16((&oz_hdr->pkt_num)), __builtin_choose_expr
(sizeof(*(&oz_hdr->pkt_num)) == 4, get_unaligned_le32(
(&oz_hdr->pkt_num)), __builtin_choose_expr(sizeof(*(&
oz_hdr->pkt_num)) == 8, get_unaligned_le64((&oz_hdr->
pkt_num)), __bad_unaligned_access_size())))); }))))
;
366
367 pd = oz_pd_find(src_addr);
368 if (pd) {
11
Taking false branch
369 pd->last_rx_time_j = jiffies;
370 oz_timer_add(pd, OZ_TIMER_TOUT1,
371 pd->last_rx_time_j + pd->presleep_j, 1);
372 if (pkt_num != pd->last_rx_pkt_num) {
373 pd->last_rx_pkt_num = pkt_num;
374 } else {
375 dup = 1;
376 oz_trace("Duplicate frame\n");
377 }
378 }
379
380 if (pd && !dup && ((pd->mode & OZ_MODE_MASK0xf) == OZ_MODE_TRIGGERED0x1)) {
381 oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
382 pd->last_sent_frame = &pd->tx_queue;
383 if (oz_hdr->control & OZ_F_ACK0x10) {
384 /* Retire completed frames */
385 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
386 }
387 if ((oz_hdr->control & OZ_F_ACK_REQUESTED0x80) &&
388 (pd->state == OZ_PD_S_CONNECTED0x2)) {
389 int backlog = pd->nb_queued_frames;
390 pd->trigger_pkt_num = pkt_num;
391 /* Send queued frames */
392 oz_send_queued_frames(pd, backlog);
393 }
394 }
395
396 length -= sizeof(struct oz_hdr);
397 elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
398
399 while (length >= sizeof(struct oz_elt)) {
12
Loop condition is true. Entering loop body
400 length -= sizeof(struct oz_elt) + elt->length;
401 if (length < 0)
13
Taking false branch
402 break;
403 switch (elt->type) {
14
Control jumps to 'case 18:' at line 426
404 case OZ_ELT_CONNECT_REQ0x06:
405 oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, NULL, 0)do { if ((1<<(10)) & g_evt_mask) oz_event_log2(10, 0
, 0, ((void *)0), 0); } while (0)
;
406 oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
407 pd = oz_connect_req(pd, elt, src_addr, skb->dev);
408 break;
409 case OZ_ELT_DISCONNECT0x08:
410 oz_trace("RX: OZ_ELT_DISCONNECT\n");
411 if (pd)
412 oz_pd_sleep(pd);
413 break;
414 case OZ_ELT_UPDATE_PARAM_REQ0x11: {
415 struct oz_elt_update_param *body =
416 (struct oz_elt_update_param *)(elt + 1);
417 oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
418 if (pd && (pd->state & OZ_PD_S_CONNECTED0x2)) {
419 spin_lock(&g_polling_lock);
420 pd_set_keepalive(pd, body->keepalive);
421 pd_set_presleep(pd, body->presleep);
422 spin_unlock(&g_polling_lock);
423 }
424 }
425 break;
426 case OZ_ELT_FAREWELL_REQ0x12: {
427 struct oz_elt_farewell *body =
428 (struct oz_elt_farewell *)(elt + 1);
429 oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
430 oz_add_farewell(pd, body->ep_num,
15
Calling 'oz_add_farewell'
431 body->index, body->report,
432 elt->length + 1 - sizeof(*body));
433 }
434 break;
435 case OZ_ELT_APP_DATA0x31:
436 if (pd && (pd->state & OZ_PD_S_CONNECTED0x2)) {
437 struct oz_app_hdr *app_hdr =
438 (struct oz_app_hdr *)(elt+1);
439 if (dup)
440 break;
441 oz_handle_app_elt(pd, app_hdr->app_id, elt);
442 }
443 break;
444 default:
445 oz_trace("RX: Unknown elt %02x\n", elt->type);
446 }
447 elt = oz_next_elt(elt)(struct oz_elt *)((u8 *)((elt) + 1) + (elt)->length);
448 }
449done:
450 if (pd)
451 oz_pd_put(pd);
452 consume_skb(skb);
453}
454/*------------------------------------------------------------------------------
455 * Context: process
456 */
457void oz_protocol_term(void)
458{
459 struct list_head *chain = NULL((void *)0);
460 del_timer_sync(&g_timer);
461 /* Walk the list of bindings and remove each one.
462 */
463 spin_lock_bh(&g_binding_lock);
464 while (g_binding) {
465 struct oz_binding *b = g_binding;
466 g_binding = b->next;
467 spin_unlock_bh(&g_binding_lock);
468 dev_remove_pack(&b->ptype);
469 if (b->ptype.dev)
470 dev_put(b->ptype.dev);
471 kfree(b);
472 spin_lock_bh(&g_binding_lock);
473 }
474 spin_unlock_bh(&g_binding_lock);
475 /* Walk the list of PDs and stop each one. This causes the PD to be
476 * removed from the list so we can just pull each one from the head
477 * of the list.
478 */
479 spin_lock_bh(&g_polling_lock);
480 while (!list_empty(&g_pd_list)) {
481 struct oz_pd *pd =
482 list_first_entry(&g_pd_list, struct oz_pd, link)({ const typeof( ((struct oz_pd *)0)->link ) *__mptr = ((&
g_pd_list)->next); (struct oz_pd *)( (char *)__mptr - __builtin_offsetof
(struct oz_pd,link) );})
;
483 oz_pd_get(pd);
484 spin_unlock_bh(&g_polling_lock);
485 oz_pd_stop(pd);
486 oz_pd_put(pd);
487 spin_lock_bh(&g_polling_lock);
488 }
489 chain = g_timer_pool;
490 g_timer_pool = NULL((void *)0);
491 spin_unlock_bh(&g_polling_lock);
492 while (chain) {
493 struct oz_timer *t = container_of(chain, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
chain); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
;
494 chain = chain->next;
495 kfree(t);
496 }
497 oz_trace("Protocol stopped\n");
498}
499/*------------------------------------------------------------------------------
500 * Context: softirq
501 */
502static void oz_pd_handle_timer(struct oz_pd *pd, int type)
503{
504 switch (type) {
505 case OZ_TIMER_TOUT1:
506 oz_pd_sleep(pd);
507 break;
508 case OZ_TIMER_STOP3:
509 oz_pd_stop(pd);
510 break;
511 case OZ_TIMER_HEARTBEAT2: {
512 u16 apps = 0;
513 spin_lock_bh(&g_polling_lock);
514 pd->heartbeat_requested = 0;
515 if (pd->state & OZ_PD_S_CONNECTED0x2)
516 apps = pd->total_apps;
517 spin_unlock_bh(&g_polling_lock);
518 if (apps)
519 oz_pd_heartbeat(pd, apps);
520 }
521 break;
522 }
523}
524/*------------------------------------------------------------------------------
525 * Context: softirq
526 */
527static void oz_protocol_timer(unsigned long arg)
528{
529 struct oz_timer *t;
530 struct oz_timer *t2;
531 struct oz_pd *pd;
532 spin_lock_bh(&g_polling_lock);
533 if (!g_cur_timer) {
534 /* This happens if we remove the current timer but can't stop
535 * the timer from firing. In this case just get out.
536 */
537 oz_event_log(OZ_EVT_TIMER, 0, 0, NULL, 0)do { if ((1<<(17)) & g_evt_mask) oz_event_log2(17, 0
, 0, ((void *)0), 0); } while (0)
;
538 spin_unlock_bh(&g_polling_lock);
539 return;
540 }
541 g_timer_state = OZ_TIMER_IN_HANDLER2;
542 t = g_cur_timer;
543 g_cur_timer = NULL((void *)0);
544 list_del(&t->link);
545 spin_unlock_bh(&g_polling_lock);
546 do {
547 pd = t->pd;
548 oz_event_log(OZ_EVT_TIMER, 0, t->type, NULL, 0)do { if ((1<<(17)) & g_evt_mask) oz_event_log2(17, 0
, t->type, ((void *)0), 0); } while (0)
;
549 oz_pd_handle_timer(pd, t->type);
550 spin_lock_bh(&g_polling_lock);
551 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE16) {
552 t->link.next = g_timer_pool;
553 g_timer_pool = &t->link;
554 g_timer_pool_count++;
555 t = NULL((void *)0);
556 }
557 if (!list_empty(&g_timer_list)) {
558 t2 = container_of(g_timer_list.next,({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
g_timer_list.next); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
559 struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
g_timer_list.next); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
;
560 if (time_before_eq(t2->due_time, jiffies)(({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&
__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy
; typeof(t2->due_time) __dummy2; (void)(&__dummy == &
__dummy2); 1; }) && ((long)(jiffies) - (long)(t2->
due_time) >= 0))
)
561 list_del(&t2->link);
562 else
563 t2 = NULL((void *)0);
564 } else {
565 t2 = NULL((void *)0);
566 }
567 spin_unlock_bh(&g_polling_lock);
568 oz_pd_put(pd);
569 kfree(t);
570 t = t2;
571 } while (t);
572 g_timer_state = OZ_TIMER_IDLE0;
573 oz_protocol_timer_start();
574}
575/*------------------------------------------------------------------------------
576 * Context: softirq
577 */
578static void oz_protocol_timer_start(void)
579{
580 spin_lock_bh(&g_polling_lock);
581 if (!list_empty(&g_timer_list)) {
582 g_cur_timer =
583 container_of(g_timer_list.next, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
g_timer_list.next); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
;
584 if (g_timer_state == OZ_TIMER_SET1) {
585 oz_event_log(OZ_EVT_TIMER_CTRL, 3,do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 3
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
586 (u16)g_cur_timer->type, NULL,do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 3
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
587 (unsigned)g_cur_timer->due_time)do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 3
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
;
588 mod_timer(&g_timer, g_cur_timer->due_time);
589 } else {
590 oz_event_log(OZ_EVT_TIMER_CTRL, 4,do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 4
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
591 (u16)g_cur_timer->type, NULL,do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 4
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
592 (unsigned)g_cur_timer->due_time)do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 4
, (u16)g_cur_timer->type, ((void *)0), (unsigned)g_cur_timer
->due_time); } while (0)
;
593 g_timer.expires = g_cur_timer->due_time;
594 g_timer.function = oz_protocol_timer;
595 g_timer.data = 0;
596 add_timer(&g_timer);
597 }
598 g_timer_state = OZ_TIMER_SET1;
599 } else {
600 oz_trace("No queued timers\n");
601 }
602 spin_unlock_bh(&g_polling_lock);
603}
604/*------------------------------------------------------------------------------
605 * Context: softirq or process
606 */
607void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
608 int remove)
609{
610 struct list_head *e;
611 struct oz_timer *t = NULL((void *)0);
612 int restart_needed = 0;
613 oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, NULL, (unsigned)due_time)do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 1
, (u16)type, ((void *)0), (unsigned)due_time); } while (0)
;
614 spin_lock(&g_polling_lock);
615 if (remove) {
616 list_for_each(e, &g_timer_list)for (e = (&g_timer_list)->next; e != (&g_timer_list
); e = e->next)
{
617 t = container_of(e, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
e); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof(
struct oz_timer,link) );})
;
618 if ((t->pd == pd) && (t->type == type)) {
619 if (g_cur_timer == t) {
620 restart_needed = 1;
621 g_cur_timer = NULL((void *)0);
622 }
623 list_del(e);
624 break;
625 }
626 t = NULL((void *)0);
627 }
628 }
629 if (!t) {
630 if (g_timer_pool) {
631 t = container_of(g_timer_pool, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
g_timer_pool); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
;
632 g_timer_pool = g_timer_pool->next;
633 g_timer_pool_count--;
634 } else {
635 t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC((( gfp_t)0x20u)));
636 }
637 if (t) {
638 t->pd = pd;
639 t->type = type;
640 oz_pd_get(pd);
641 }
642 }
643 if (t) {
644 struct oz_timer *t2;
645 t->due_time = due_time;
646 list_for_each(e, &g_timer_list)for (e = (&g_timer_list)->next; e != (&g_timer_list
); e = e->next)
{
647 t2 = container_of(e, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
e); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof(
struct oz_timer,link) );})
;
648 if (time_before(due_time, t2->due_time)(({ unsigned long __dummy; typeof(t2->due_time) __dummy2; (
void)(&__dummy == &__dummy2); 1; }) && ({ unsigned
long __dummy; typeof(due_time) __dummy2; (void)(&__dummy
== &__dummy2); 1; }) && ((long)(due_time) - (long
)(t2->due_time) < 0))
) {
649 if (t2 == g_cur_timer) {
650 g_cur_timer = NULL((void *)0);
651 restart_needed = 1;
652 }
653 break;
654 }
655 }
656 list_add_tail(&t->link, e);
657 }
658 if (g_timer_state == OZ_TIMER_IDLE0)
659 restart_needed = 1;
660 else if (g_timer_state == OZ_TIMER_IN_HANDLER2)
661 restart_needed = 0;
662 spin_unlock(&g_polling_lock);
663 if (restart_needed)
664 oz_protocol_timer_start();
665}
666/*------------------------------------------------------------------------------
667 * Context: softirq or process
668 */
669void oz_timer_delete(struct oz_pd *pd, int type)
670{
671 struct list_head *chain = NULL((void *)0);
672 struct oz_timer *t;
673 struct oz_timer *n;
674 int restart_needed = 0;
675 int release = 0;
676 oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, NULL, 0)do { if ((1<<(16)) & g_evt_mask) oz_event_log2(16, 2
, (u16)type, ((void *)0), 0); } while (0)
;
677 spin_lock(&g_polling_lock);
678 list_for_each_entry_safe(t, n, &g_timer_list, link)for (t = ({ const typeof( ((typeof(*t) *)0)->link ) *__mptr
= ((&g_timer_list)->next); (typeof(*t) *)( (char *)__mptr
- __builtin_offsetof(typeof(*t),link) );}), n = ({ const typeof
( ((typeof(*t) *)0)->link ) *__mptr = (t->link.next); (
typeof(*t) *)( (char *)__mptr - __builtin_offsetof(typeof(*t)
,link) );}); &t->link != (&g_timer_list); t = n, n
= ({ const typeof( ((typeof(*n) *)0)->link ) *__mptr = (n
->link.next); (typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(typeof(*n),link) );}))
{
679 if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
680 if (g_cur_timer == t) {
681 restart_needed = 1;
682 g_cur_timer = NULL((void *)0);
683 del_timer(&g_timer);
684 }
685 list_del(&t->link);
686 release++;
687 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE16) {
688 t->link.next = g_timer_pool;
689 g_timer_pool = &t->link;
690 g_timer_pool_count++;
691 } else {
692 t->link.next = chain;
693 chain = &t->link;
694 }
695 if (type)
696 break;
697 }
698 }
699 if (g_timer_state == OZ_TIMER_IN_HANDLER2)
700 restart_needed = 0;
701 else if (restart_needed)
702 g_timer_state = OZ_TIMER_IDLE0;
703 spin_unlock(&g_polling_lock);
704 if (restart_needed)
705 oz_protocol_timer_start();
706 while (release--)
707 oz_pd_put(pd);
708 while (chain) {
709 t = container_of(chain, struct oz_timer, link)({ const typeof( ((struct oz_timer *)0)->link ) *__mptr = (
chain); (struct oz_timer *)( (char *)__mptr - __builtin_offsetof
(struct oz_timer,link) );})
;
710 chain = chain->next;
711 kfree(t);
712 }
713}
714/*------------------------------------------------------------------------------
715 * Context: softirq or process
716 */
717void oz_pd_request_heartbeat(struct oz_pd *pd)
718{
719 unsigned long now = jiffies;
720 unsigned long t;
721 spin_lock(&g_polling_lock);
722 if (pd->heartbeat_requested) {
723 spin_unlock(&g_polling_lock);
724 return;
725 }
726 if (pd->pulse_period_j)
727 t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
728 else
729 t = now + 1;
730 pd->heartbeat_requested = 1;
731 spin_unlock(&g_polling_lock);
732 oz_timer_add(pd, OZ_TIMER_HEARTBEAT2, t, 0);
733}
734/*------------------------------------------------------------------------------
735 * Context: softirq or process
736 */
737struct oz_pd *oz_pd_find(u8 *mac_addr)
738{
739 struct oz_pd *pd;
740 struct list_head *e;
741 spin_lock_bh(&g_polling_lock);
742 list_for_each(e, &g_pd_list)for (e = (&g_pd_list)->next; e != (&g_pd_list); e =
e->next)
{
743 pd = container_of(e, struct oz_pd, link)({ const typeof( ((struct oz_pd *)0)->link ) *__mptr = (e)
; (struct oz_pd *)( (char *)__mptr - __builtin_offsetof(struct
oz_pd,link) );})
;
744 if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN6) == 0) {
745 atomic_inc(&pd->ref_count);
746 spin_unlock_bh(&g_polling_lock);
747 return pd;
748 }
749 }
750 spin_unlock_bh(&g_polling_lock);
751 return NULL((void *)0);
752}
753/*------------------------------------------------------------------------------
754 * Context: process
755 */
756void oz_app_enable(int app_id, int enable)
757{
758 if (app_id <= OZ_APPID_MAX0x4) {
759 spin_lock_bh(&g_polling_lock);
760 if (enable)
761 g_apps |= (1<<app_id);
762 else
763 g_apps &= ~(1<<app_id);
764 spin_unlock_bh(&g_polling_lock);
765 }
766}
767/*------------------------------------------------------------------------------
768 * Context: softirq
769 */
770static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
771 struct packet_type *pt, struct net_device *orig_dev)
772{
773 oz_event_log(OZ_EVT_RX_FRAME, 0, 0, NULL, 0)do { if ((1<<(0)) & g_evt_mask) oz_event_log2(0, 0,
0, ((void *)0), 0); } while (0)
;
774 skb = skb_share_check(skb, GFP_ATOMIC((( gfp_t)0x20u)));
775 if (skb == NULL((void *)0))
1
Taking false branch
776 return 0;
777 spin_lock_bh(&g_rx_queue.lock);
778 if (g_processing_rx) {
2
Taking false branch
779 /* We already hold the lock so use __ variant.
780 */
781 __skb_queue_head(&g_rx_queue, skb);
782 spin_unlock_bh(&g_rx_queue.lock);
783 } else {
784 g_processing_rx = 1;
785 do {
4
Loop condition is true. Execution continues on line 787
6
Loop condition is true. Execution continues on line 787
8
Loop condition is true. Execution continues on line 787
786
787 spin_unlock_bh(&g_rx_queue.lock);
788 oz_rx_frame(skb);
9
Calling 'oz_rx_frame'
789 spin_lock_bh(&g_rx_queue.lock);
790 if (skb_queue_empty(&g_rx_queue)) {
3
Taking false branch
5
Taking false branch
7
Taking false branch
791 g_processing_rx = 0;
792 spin_unlock_bh(&g_rx_queue.lock);
793 break;
794 }
795 /* We already hold the lock so use __ variant.
796 */
797 skb = __skb_dequeue(&g_rx_queue);
798 } while (1);
799 }
800 return 0;
801}
802/*------------------------------------------------------------------------------
803 * Context: process
804 */
805void oz_binding_add(char *net_dev)
806{
807 struct oz_binding *binding;
808
809 binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
810 if (binding) {
811 binding->ptype.type = __constant_htons(OZ_ETHERTYPE)(( __be16)((__u16)( (((__u16)((0x892e)) & (__u16)0x00ffU)
<< 8) | (((__u16)((0x892e)) & (__u16)0xff00U) >>
8))))
;
812 binding->ptype.func = oz_pkt_recv;
813 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN)({ size_t __len = (32); void *__ret; if (__builtin_constant_p
(32) && __len >= 64) __ret = __memcpy((binding->
name), (net_dev), __len); else __ret = __builtin_memcpy((binding
->name), (net_dev), __len); __ret; })
;
814 if (net_dev && *net_dev) {
815 oz_trace("Adding binding: %s\n", net_dev);
816 binding->ptype.dev =
817 dev_get_by_name(&init_net, net_dev);
818 if (binding->ptype.dev == NULL((void *)0)) {
819 oz_trace("Netdev %s not found\n", net_dev);
820 kfree(binding);
821 binding = NULL((void *)0);
822 }
823 } else {
824 oz_trace("Binding to all netcards\n");
825 binding->ptype.dev = NULL((void *)0);
826 }
827 if (binding) {
828 dev_add_pack(&binding->ptype);
829 spin_lock_bh(&g_binding_lock);
830 binding->next = g_binding;
831 g_binding = binding;
832 spin_unlock_bh(&g_binding_lock);
833 }
834 }
835}
836/*------------------------------------------------------------------------------
837 * Context: process
838 */
839static int compare_binding_name(char *s1, char *s2)
840{
841 int i;
842 for (i = 0; i < OZ_MAX_BINDING_LEN32; i++) {
843 if (*s1 != *s2)
844 return 0;
845 if (!*s1++)
846 return 1;
847 s2++;
848 }
849 return 1;
850}
851/*------------------------------------------------------------------------------
852 * Context: process
853 */
854static void pd_stop_all_for_device(struct net_device *net_dev)
855{
856 struct list_head h;
857 struct oz_pd *pd;
858 struct oz_pd *n;
859 INIT_LIST_HEAD(&h);
860 spin_lock_bh(&g_polling_lock);
861 list_for_each_entry_safe(pd, n, &g_pd_list, link)for (pd = ({ const typeof( ((typeof(*pd) *)0)->link ) *__mptr
= ((&g_pd_list)->next); (typeof(*pd) *)( (char *)__mptr
- __builtin_offsetof(typeof(*pd),link) );}), n = ({ const typeof
( ((typeof(*pd) *)0)->link ) *__mptr = (pd->link.next);
(typeof(*pd) *)( (char *)__mptr - __builtin_offsetof(typeof(
*pd),link) );}); &pd->link != (&g_pd_list); pd = n
, n = ({ const typeof( ((typeof(*n) *)0)->link ) *__mptr =
(n->link.next); (typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(typeof(*n),link) );}))
{
862 if (pd->net_dev == net_dev) {
863 list_move(&pd->link, &h);
864 oz_pd_get(pd);
865 }
866 }
867 spin_unlock_bh(&g_polling_lock);
868 while (!list_empty(&h)) {
869 pd = list_first_entry(&h, struct oz_pd, link)({ const typeof( ((struct oz_pd *)0)->link ) *__mptr = ((&
h)->next); (struct oz_pd *)( (char *)__mptr - __builtin_offsetof
(struct oz_pd,link) );})
;
870 oz_pd_stop(pd);
871 oz_pd_put(pd);
872 }
873}
874/*------------------------------------------------------------------------------
875 * Context: process
876 */
877void oz_binding_remove(char *net_dev)
878{
879 struct oz_binding *binding = NULL((void *)0);
880 struct oz_binding **link;
881 oz_trace("Removing binding: %s\n", net_dev);
882 spin_lock_bh(&g_binding_lock);
883 binding = g_binding;
884 link = &g_binding;
885 while (binding) {
886 if (compare_binding_name(binding->name, net_dev)) {
887 oz_trace("Binding '%s' found\n", net_dev);
888 *link = binding->next;
889 break;
890 } else {
891 link = &binding;
892 binding = binding->next;
893 }
894 }
895 spin_unlock_bh(&g_binding_lock);
896 if (binding) {
897 dev_remove_pack(&binding->ptype);
898 if (binding->ptype.dev) {
899 dev_put(binding->ptype.dev);
900 pd_stop_all_for_device(binding->ptype.dev);
901 }
902 kfree(binding);
903 }
904}
905/*------------------------------------------------------------------------------
906 * Context: process
907 */
908static char *oz_get_next_device_name(char *s, char *dname, int max_size)
909{
910 while (*s == ',')
911 s++;
912 while (*s && (*s != ',') && max_size > 1) {
913 *dname++ = *s++;
914 max_size--;
915 }
916 *dname = 0;
917 return s;
918}
919/*------------------------------------------------------------------------------
920 * Context: process
921 */
922int oz_protocol_init(char *devs)
923{
924 skb_queue_head_init(&g_rx_queue);
925 if (devs && (devs[0] == '*')) {
926 oz_binding_add(NULL((void *)0));
927 } else {
928 char d[32];
929 while (*devs) {
930 devs = oz_get_next_device_name(devs, d, sizeof(d));
931 if (d[0])
932 oz_binding_add(d);
933 }
934 }
935 init_timer(&g_timer)init_timer_key(((&g_timer)), (0), ((void *)0), ((void *)0
))
;
936 return 0;
937}
938/*------------------------------------------------------------------------------
939 * Context: process
940 */
941int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
942{
943 struct oz_pd *pd;
944 struct list_head *e;
945 int count = 0;
946 spin_lock_bh(&g_polling_lock);
947 list_for_each(e, &g_pd_list)for (e = (&g_pd_list)->next; e != (&g_pd_list); e =
e->next)
{
948 if (count >= max_count)
949 break;
950 pd = container_of(e, struct oz_pd, link)({ const typeof( ((struct oz_pd *)0)->link ) *__mptr = (e)
; (struct oz_pd *)( (char *)__mptr - __builtin_offsetof(struct
oz_pd,link) );})
;
951 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN)({ size_t __len = (6); void *__ret; if (__builtin_constant_p(
6) && __len >= 64) __ret = __memcpy((&addr[count
++]), (pd->mac_addr), __len); else __ret = __builtin_memcpy
((&addr[count++]), (pd->mac_addr), __len); __ret; })
;
952 }
953 spin_unlock_bh(&g_polling_lock);
954 return count;
955}
956/*------------------------------------------------------------------------------
957*/
958void oz_polling_lock_bh(void)
959{
960 spin_lock_bh(&g_polling_lock);
961}
962/*------------------------------------------------------------------------------
963*/
964void oz_polling_unlock_bh(void)
965{
966 spin_unlock_bh(&g_polling_lock);
967}