#define MAX_FDS 1024 // autogenerated by syzkaller (https://github.com/google/syzkaller) #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void thread_start(void* (*fn)(void*), void* arg) { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 128 << 10); int i = 0; for (; i < 100; i++) { if (pthread_create(&th, &attr, fn, arg) == 0) { pthread_attr_destroy(&attr); return; } if (errno == EAGAIN) { usleep(50); continue; } break; } exit(1); } typedef struct { int state; } event_t; static void event_init(event_t* ev) { ev->state = 0; } static void event_reset(event_t* ev) { ev->state = 0; } static void event_set(event_t* ev) { if (ev->state) exit(1); __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE); syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000); } static void event_wait(event_t* ev) { while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0); } static int event_isset(event_t* ev) { return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE); } static void setup_test() { prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); setpgrp(); } static void close_fds() { for (int fd = 3; fd < MAX_FDS; fd++) close(fd); } static long syz_func(int iter, int i) { int kvm_fd1 = -1, vm_fd1 = -1, vcpu_fd1 = -1; int kvm_fd2 = -1, vm_fd2 = -1, vcpu_fd2 = -1; int kvm_check_fd; int err_irq1, err_irq2; int err_vcpu1, err_vcpu2; kvm_check_fd = open("/dev/kvm", O_RDWR); if (kvm_check_fd == -1) { return 0; } close(kvm_check_fd); kvm_fd1 = open("/dev/kvm", O_RDWR); if (kvm_fd1 == -1) goto cleanup; vm_fd1 = ioctl(kvm_fd1, KVM_CREATE_VM, 0); if (vm_fd1 == -1) goto cleanup; errno = 0; ioctl(vm_fd1, KVM_CREATE_IRQCHIP, 0); err_irq1 = errno; kvm_fd2 = open("/dev/kvm", O_RDWR); if (kvm_fd2 == -1) goto cleanup; vm_fd2 = ioctl(kvm_fd2, KVM_CREATE_VM, 0); if (vm_fd2 == -1) goto cleanup; errno = 0; ioctl(vm_fd2, KVM_CREATE_IRQCHIP, 0); err_irq2 = errno; if (err_irq1 || err_irq2) goto cleanup; errno = 0; vcpu_fd1 = ioctl(vm_fd1, KVM_CREATE_VCPU, 0); err_vcpu1 = errno; errno = 0; vcpu_fd2 = ioctl(vm_fd2, KVM_CREATE_VCPU, 0); err_vcpu2 = errno; errno = 0; if ((vcpu_fd1 == -1) ^ (vcpu_fd2 == -1)) { fprintf(stderr, "[pid %d] foobar %d %d!\n", getpid(), iter, i); fprintf(stderr, "kvm_fd1=%d\n", kvm_fd1); fprintf(stderr, "kvm_fd2=%d\n", kvm_fd2); fprintf(stderr, "vm_fd1=%d, err_irq1=%d\n", vm_fd1, err_irq1); fprintf(stderr, "vm_fd2=%d, err_irq2=%d\n", vm_fd2, err_irq2); fprintf(stderr, "vcpu_fd1=%d, err_vcpu1=%d\n", vcpu_fd1, err_vcpu1); fprintf(stderr, "vcpu_fd2=%d, err_vcpu2=%d\n", vcpu_fd2, err_vcpu2); vcpu_fd1 = ioctl(vm_fd1, KVM_CREATE_VCPU, 1); err_vcpu1 = errno; errno = 0; fprintf(stderr, "trying again vcpu_fd1=%d, err_vcpu1=%d\n", vcpu_fd1, err_vcpu1); vcpu_fd2 = ioctl(vm_fd2, KVM_CREATE_VCPU, 1); err_vcpu2 = errno; errno = 0; fprintf(stderr, "trying again vcpu_fd2=%d, err_vcpu2=%d\n", vcpu_fd2, err_vcpu2); char f[64]; sprintf(f, "ls -l /proc/%d/fd", getpid()); system(f); fprintf(stderr, "[pid %d] foobar %d %d done!\n", getpid(), iter, i); } cleanup: if (vcpu_fd1 != -1) close(vcpu_fd1); if (vcpu_fd2 != -1) close(vcpu_fd2); if (vm_fd1 != -1) close(vm_fd1); if (vm_fd2 != -1) close(vm_fd2); if (kvm_fd1 != -1) close(kvm_fd1); if (kvm_fd2 != -1) close(kvm_fd2); return 0; } struct thread_t { int created, iter; event_t ready, done; }; static struct thread_t threads[16]; static int running; static void* thr(void* arg) { struct thread_t* th = (struct thread_t*)arg; for (;;) { event_wait(&th->ready); event_reset(&th->ready); for (int i = 0; i <= 32; i++) { syz_func(th->iter, i); } __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); event_set(&th->done); } return 0; } static void execute_one(int iter) { write(1, "executing program\n", sizeof("executing program\n") - 1); int i, thread; for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) { struct thread_t* th = &threads[thread]; if (!th->created) { th->created = 1; th->iter = iter; event_init(&th->ready); event_init(&th->done); event_set(&th->done); thread_start(thr, th); } if (event_isset(&th->done)) { event_reset(&th->done); __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); event_set(&th->ready); } } for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++) usleep(1000); } int main(void) { int iter = 0; setup_test(); for (;;) { execute_one(iter++); close_fds(); } return 0; }