• Bug#1105129: Please backport ntsync driver from Linux-6.14 to trixie (2

    From Piotr Morgwai Kotarbinski@21:1/5 to All on Sun May 11 22:50:01 2025
    [continued from previous message]

    + prev_state = event->u.event.signaled;
    + event->u.event.signaled = true;
    + if (all)
    + try_wake_all_obj(dev, event);
    + try_wake_any_event(event);
    + if (pulse)
    + event->u.event.signaled = false;
    +
    + ntsync_unlock_obj(dev, event, all);
    +
    + if (put_user(prev_state, (__u32 __user *)argp))
    + return -EFAULT;
    +
    + return 0;
    +}
    +
    +static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp)
    +{
    + struct ntsync_device *dev = event->dev;
    + __u32 prev_state;
    + bool all;
    +
    + if (event->type != NTSYNC_TYPE_EVENT)
    + return -EINVAL;
    +
    + all = ntsync_lock_obj(dev, event);
    +
    + prev_state = event->u.event.signaled;
    + event->u.event.signaled = false;
    +
    + ntsync_unlock_obj(dev, event, all);
    +
    + if (put_user(prev_state, (__u32 __user *)argp))
    + return -EFAULT;
    +
    + return 0;
    +}
    +
    +static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp)
    +{
    + struct ntsync_sem_args __user *user_args = argp;
    + struct ntsync_device *dev = sem->dev;
    + struct ntsync_sem_args args;
    + bool all;
    +
    + if (sem->type != NTSYNC_TYPE_SEM)
    + return -EINVAL;
    +
    + all = ntsync_lock_obj(dev, sem);

    + args.count = sem->u.sem.count;
    + args.max = sem->u.sem.max;
    +
    + ntsync_unlock_obj(dev, sem, all);
    +
    + if (copy_to_user(user_args, &args, sizeof(args)))
    + return -EFAULT;
    + return 0;
    +}
    +
    +static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp)
    +{
    + struct ntsync_mutex_args __user *user_args = argp;
    + struct ntsync_device *dev = mutex->dev;
    + struct ntsync_mutex_args args;
    + bool all;
    + int ret;
    +
    + if (mutex->type != NTSYNC_TYPE_MUTEX)
    + return -EINVAL;
    +
    + all = ntsync_lock_obj(dev, mutex);
    +
    + args.count = mutex->u.mutex.count;
    + args.owner = mutex->u.mutex.owner;
    + ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0;
    +
    + ntsync_unlock_obj(dev, mutex, all);
    +
    + if (copy_to_user(user_args, &args, sizeof(args)))
    + return -EFAULT;
    + return ret;
    +}
    +
    +static int ntsync_event_read(struct ntsync_obj *event, void __user *argp)
    +{
    + struct ntsync_event_args __user *user_args = argp;
    + struct ntsync_device *dev = event->dev;
    + struct ntsync_event_args args;
    + bool all;
    +
    + if (event->type != NTSYNC_TYPE_EVENT)
    + return -EINVAL;
    +
    + all = ntsync_lock_obj(dev, event);
    +
    + args.manual = event->u.event.manual;
    + args.signaled = event->u.event.signaled;
    +
    + ntsync_unlock_obj(dev, event, all);
    +
    + if (copy_to_user(user_args, &args, sizeof(args)))
    + return -EFAULT;
    + return 0;
    +}
    +
    +static void ntsync_free_obj(struct ntsync_obj *obj)
    +{
    fput(obj->dev->file);
    kfree(obj);
    +}

    +static int ntsync_obj_release(struct inode *inode, struct file *file)
    +{
    + ntsync_free_obj(file->private_data);
    return 0;
    }

    @@ -114,8 +670,24 @@ static long ntsync_obj_ioctl(struct file
    void __user *argp = (void __user *)parm;

    switch (cmd) {
    - case NTSYNC_IOC_SEM_POST:
    - return ntsync_sem_post(obj, argp);
    + case NTSYNC_IOC_SEM_RELEASE:
    + return ntsync_sem_release(obj, argp);
    + case NTSYNC_IOC_SEM_READ:
    + return ntsync_sem_read(obj, argp);
    + case NTSYNC_IOC_MUTEX_UNLOCK:
    + return ntsync_mutex_unlock(obj, argp);
    + case NTSYNC_IOC_MUTEX_KILL:
    + return ntsync_mutex_kill(obj, argp);
    + case NTSYNC_IOC_MUTEX_READ:
    + return ntsync_mutex_read(obj, argp);
    + case NTSYNC_IOC_EVENT_SET:
    + return ntsync_event_set(obj, argp, false);
    + case NTSYNC_IOC_EVENT_RESET:
    + return ntsync_event_reset(obj, argp);
    + case NTSYNC_IOC_EVENT_PULSE:
    + return ntsync_event_set(obj, argp, true);
    + case NTSYNC_IOC_EVENT_READ:
    + return ntsync_event_read(obj, argp);
    default:
    return -ENOIOCTLCMD;
    }
    @@ -140,6 +712,9 @@ static struct ntsync_obj *ntsync_alloc_o
    obj->dev = dev;
    get_file(dev->file);
    spin_lock_init(&obj->lock);
    + INIT_LIST_HEAD(&obj->any_waiters);
    + INIT_LIST_HEAD(&obj->all_waiters);
    + atomic_set(&obj->all_hint, 0);

    return obj;
    }
    @@ -165,7 +740,6 @@ static int ntsync_obj_get_fd(struct ntsy

    static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
    {
    - struct ntsync_sem_args __user *user_args = argp;
    struct ntsync_sem_args args;
    struct ntsync_obj *sem;
    int fd;
    @@ -182,12 +756,398 @@ static int ntsync_create_sem(struct ntsy
    sem->u.sem.count = args.count;
    sem->u.sem.max = args.max;
    fd = ntsync_obj_get_fd(sem);
    - if (fd < 0) {
    - kfree(sem);
    - return fd;
    + if (fd < 0)
    + ntsync_free_obj(sem);
    +
    + return fd;
    +}
    +
    +static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp) +{
    + struct ntsync_mutex_args args;
    + struct ntsync_obj *mutex;
    + int fd;
    +
    + if (copy_from_user(&args, argp, sizeof(args)))
    + return -EFAULT;
    +
    + if (!args.owner != !args.count)
    + return -EINVAL;
    +
    + mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
    + if (!mutex)
    + return -ENOMEM;
    + mutex->u.mutex.count = args.count;
    + mutex->u.mutex.owner = args.owner;
    + fd = ntsync_obj_get_fd(mutex);
    + if (fd < 0)
    + ntsync_free_obj(mutex);
    +
    + return fd;
    +}
    +
    +static int ntsync_create_event(struct ntsync_device *dev, void __user *argp) +{
    + struct ntsync_event_args args;
    + struct ntsync_obj *event;
    + int fd;
    +
    + if (copy_from_user(&args, argp, sizeof(args)))
    + return -EFAULT;
    +
    + event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
    + if (!event)
    + return -ENOMEM;
    + event->u.event.manual = args.manual;
    + event->u.event.signaled = args.signaled;
    + fd = ntsync_obj_get_fd(event);
    + if (fd < 0)
    + ntsync_free_obj(event);
    +
    + return fd;
    +}
    +
    +static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
    +{
    + struct file *file = fget(fd);
    + struct ntsync_obj *obj;
    +
    + if (!file)
    + return NULL;
    +
    + if (file->f_op != &ntsync_obj_fops) {
    + fput(file);
    + return NULL;
    }

    - return put_user(fd, &user_args->sem);
    + obj = file->private_data;
    + if (obj->dev != dev) {
    + fput(file);
    + return NULL;
    + }
    +
    + return obj;
    +}
    +
    +static void put_obj(struct ntsync_obj *obj)
    +{
    + fput(obj->file);
    +}
    +
    +static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
    +{
    + ktime_t timeout = ns_to_ktime(args->timeout);
    + clockid_t clock = CLOCK_MONOTONIC;
    + ktime_t *timeout_ptr;
    + int ret = 0;
    +
    + timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
    +
    + if (args->flags & NTSYNC_WAIT_REALTIME)
    + clock = CLOCK_REALTIME;
    +
    + do {
    + if (signal_pending(current)) {
    + ret = -ERESTARTSYS;
    + break;
    + }
    +
    + set_current_state(TASK_INTERRUPTIBLE);
    + if (atomic_read(&q->signaled) != -1) {
    + ret = 0;
    + break;
    + }
    + ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
    + } while (ret < 0);
    + __set_current_state(TASK_RUNNING);
    +
    + return ret;
    +}
    +
    +/*
    + * Allocate and initialize the ntsync_q structure, but do not queue us yet.
    + */
    +static int setup_wait(struct ntsync_device *dev,
    + const struct ntsync_wait_args *args, bool all,
    + struct ntsync_q **ret_q)
    +{
    + int fds[NTSYNC_MAX_WAIT_COUNT + 1];
    + const __u32 count = args->count;
    + size_t size = array_size(count, sizeof(fds[0]));
    + struct ntsync_q *q;
    + __u32 total_count;
    + __u32 i, j;
    +
    + if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
    + return -EINVAL;
    +
    + if (size >= sizeof(fds))
    + return -EINVAL;
    +
    + total_count = count;
    + if (args->alert)
    + total_count++;
    +
    + if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
    + return -EFAULT;
    + if (args->alert)
    + fds[count] = args->alert;
    +
    + q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL);
    + if (!q)
    + return -ENOMEM;
    + q->task = current;
    + q->owner = args->owner;
    + atomic_set(&q->signaled, -1);
    + q->all = all;
    + q->ownerdead = false;
    + q->count = count;
    +
    + for (i = 0; i < total_count; i++) {
    + struct ntsync_q_entry *entry = &q->entries[i];
    + struct ntsync_obj *obj = get_obj(dev, fds[i]);
    +
    + if (!obj)
    + goto err;
    +
    + if (all) {
    + /* Check that the objects are all distinct. */
    + for (j = 0; j < i; j++) {
    + if (obj == q->entries[j].obj) {
    + put_obj(obj);
    + goto err;
    + }
    + }
    + }
    +
    + entry->obj = obj;
    + entry->q = q;
    + entry->index = i;
    + }
    +
    + *ret_q = q;
    + return 0;
    +
    +err:
    + for (j = 0; j < i; j++)
    + put_obj(q->entries[j].obj);
    + kfree(q);
    + return -EINVAL;
    +}
    +
    +static void try_wake_any_obj(struct ntsync_obj *obj)
    +{
    + switch (obj->type) {
    + case NTSYNC_TYPE_SEM:
    + try_wake_any_sem(obj);
    + break;
    + case NTSYNC_TYPE_MUTEX:
    + try_wake_any_mutex(obj);
    + break;
    + case NTSYNC_TYPE_EVENT:
    + try_wake_any_event(obj);
    + break;
    + }
    +}
    +
    +static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
    +{
    + struct ntsync_wait_args args;
    + __u32 i, total_count;
    + struct ntsync_q *q;
    + int signaled;
    + bool all;
    + int ret;
    +
    + if (copy_from_user(&args, argp, sizeof(args)))
    + return -EFAULT;
    +
    + ret = setup_wait(dev, &args, false, &q);
    + if (ret < 0)
    + return ret;
    +
    + total_count = args.count;
    + if (args.alert)
    + total_count++;
    +
    + /* queue ourselves */
    +
    + for (i = 0; i < total_count; i++) {
    + struct ntsync_q_entry *entry = &q->entries[i];
    + struct ntsync_obj *obj = entry->obj;
    +
    + all = ntsync_lock_obj(dev, obj);
    + list_add_tail(&entry->node, &obj->any_waiters);
    + ntsync_unlock_obj(dev, obj, all);
    + }
    +
    + /*
    + * Check if we are already signaled.
    + *
    + * Note that the API requires that normal objects are checked before
    + * the alert event. Hence we queue the alert event last, and check
    + * objects in order.
    + */
    +
    + for (i = 0; i < total_count; i++) {
    + struct ntsync_obj *obj = q->entries[i].obj;
    +
    + if (atomic_read(&q->signaled) != -1)
    + break;
    +
    + all = ntsync_lock_obj(dev, obj);
    + try_wake_any_obj(obj);
    + ntsync_unlock_obj(dev, obj, all);
    + }
    +
    + /* sleep */
    +
    + ret = ntsync_schedule(q, &args);
    +
    + /* and finally, unqueue */
    +
    + for (i = 0; i < total_count; i++) {
    + struct ntsync_q_entry *entry = &q->entries[i];
    + struct ntsync_obj *obj = entry->obj;
    +
    + all = ntsync_lock_obj(dev, obj);
    + list_del(&entry->node);
    + ntsync_unlock_obj(dev, obj, all);
    +
    + put_obj(obj);
    + }
    +
    + signaled = atomic_read(&q->signaled);
    + if (signaled != -1) {
    + struct ntsync_wait_args __user *user_args = argp;
    +
    + /* even if we caught a signal, we need to communicate success */
    + ret = q->ownerdead ? -EOWNERDEAD : 0;
    +
    + if (put_user(signaled, &user_args->index))
    + ret = -EFAULT;
    + } else if (!ret) {
    + ret = -ETIMEDOUT;
    + }
    +
    + kfree(q);
    + return ret;
    +}
    +
    +static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
    +{
    + struct ntsync_wait_args args;
    + struct ntsync_q *q;
    + int signaled;
    + __u32 i;
    + int ret;
    +
    + if (copy_from_user(&args, argp, sizeof(args)))
    + return -EFAULT;
    +
    + ret = setup_wait(dev, &args, true, &q);
    + if (ret < 0)
    + return ret;
    +
    + /* queue ourselves */
    +
    + mutex_lock(&dev->wait_all_lock);
    +
    + for (i = 0; i < args.count; i++) {
    + struct ntsync_q_entry *entry = &q->entries[i];
    + struct ntsync_obj *obj = entry->obj;
    +
    + atomic_inc(&obj->all_hint);
    +
    + /*
    + * obj->all_waiters is protected by dev->wait_all_lock rather
    + * than obj->lock, so there is no need to acquire obj->lock
    + * here.
    + */
    + list_add_tail(&entry->node, &obj->all_waiters);
    + }
    + if (args.alert) {
    + struct ntsync_q_entry *entry = &q->entries[args.count];
    + struct ntsync_obj *obj = entry->obj;
    +
    + dev_lock_obj(dev, obj);
    + list_add_tail(&entry->node, &obj->any_waiters);
    + dev_unlock_obj(dev, obj);
    + }
    +
    + /* check if we are already signaled */
    +
    + try_wake_all(dev, q, NULL);
    +
    + mutex_unlock(&dev->wait_all_lock);
    +
    + /*
    + * Check if the alert event is signaled, making sure to do so only
    + * after checking if the other objects are signaled.
    + */
    +
    + if (args.alert) {
    + struct ntsync_obj *obj = q->entries[args.count].obj;
    +
    + if (atomic_read(&q->signaled) == -1) {
    + bool all = ntsync_lock_obj(dev, obj);
    + try_wake_any_obj(obj);
    + ntsync_unlock_obj(dev, obj, all);
    + }
    + }
    +
    + /* sleep */
    +
    + ret = ntsync_schedule(q, &args);
    +
    + /* and finally, unqueue */
    +
    + mutex_lock(&dev->wait_all_lock);
    +
    + for (i = 0; i < args.count; i++) {
    + struct ntsync_q_entry *entry = &q->entries[i];
    + struct ntsync_obj *obj = entry->obj;
    +
    + /*
    + * obj->all_waiters is protected by dev->wait_all_lock rather
    + * than obj->lock, so there is no need to acquire it here.
    + */
    + list_del(&entry->node);
    +
    + atomic_dec(&obj->all_hint);
    +
    + put_obj(obj);
    + }
    +
    + mutex_unlock(&dev->wait_all_lock);
    +
    + if (args.alert) {
    + struct ntsync_q_entry *entry = &q->entries[args.count];
    + struct ntsync_obj *obj = entry->obj;
    + bool all;
    +
    + all = ntsync_lock_obj(dev, obj);
    + list_del(&entry->node);
    + ntsync_unlock_obj(dev, obj, all);
    +
    + put_obj(obj);
    + }
    +
    + signaled = atomic_read(&q->signaled);
    + if (signaled != -1) {
    + struct ntsync_wait_args __user *user_args = argp;
    +
    + /* even if we caught a signal, we need to communicate success */
    + ret = q->ownerdead ? -EOWNERDEAD : 0;
    +
    + if (put_user(signaled, &user_args->index))
    + ret = -EFAULT;
    + } else if (!ret) {
    + ret = -ETIMEDOUT;
    + }
    +
    + kfree(q);
    + return ret;
    }

    static int ntsync_char_open(struct inode *inode, struct file *file)
    @@ -198,6 +1158,8 @@ static int ntsync_char_open(struct inode
    if (!dev)
    return -ENOMEM;

    + mutex_init(&dev->wait_all_lock);
    +
    file->private_data = dev;
    dev->file = file;
    return nonseekable_open(inode, file);
    @@ -219,8 +1181,16 @@ static long ntsync_char_ioctl(struct fil
    void __user *argp = (void __user *)parm;

    switch (cmd) {
    + case NTSYNC_IOC_CREATE_EVENT:
    + return ntsync_create_event(dev, argp);
    + case NTSYNC_IOC_CREATE_MUTEX:
    + return ntsync_create_mutex(dev, argp);
    case NTSYNC_IOC_CREATE_SEM:
    return ntsync_create_sem(dev, argp);
    + case NTSYNC_IOC_WAIT_ALL:
    + return ntsync_wait_all(dev, argp);
    + case NTSYNC_IOC_WAIT_ANY:
    + return ntsync_wait_any(dev, argp);
    default:
    return -ENOIOCTLCMD;
    }
    @@ -238,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
    .minor = MISC_DYNAMIC_MINOR,
    .name = NTSYNC_NAME,
    .fops = &ntsync_fops,
    + .mode = 0666,
    };

    module_misc_device(ntsync_misc);
    Index: linux-6.12.27/include/uapi/linux/ntsync.h ===================================================================
    --- linux-6.12.27.orig/include/uapi/linux/ntsync.h
    +++ linux-6.12.27/include/uapi/linux/ntsync.h
    @@ -11,13 +11,49 @@
    #include <linux/types.h>

    struct ntsync_sem_args {
    - __u32 sem;
    __u32 count;
    __u32 max;
    };

    -#define NTSYNC_IOC_CREATE_SEM _IOWR('N', 0x80, struct ntsync_sem_args)
    +struct ntsync_mutex_args {
    + __u32 owner;
    + __u32 count;
    +};
    +
    +struct ntsync_event_args {
    + __u32 manual;
    + __u32 signaled;
    +};
    +
    +#define NTSYNC_WAIT_REALTIME 0x1
    +
    +struct ntsync_wait_args {
    + __u64 timeout;
    + __u64 objs;
    + __u32 count;
    + __u32 index;
    + __u32 flags;
    + __u32 owner;
    + __u32 alert;
    + __u32 pad;
    +};
    +
    +#define NTSYNC_MAX_WAIT_COUNT 64
    +
    +#define NTSYNC_IOC_CREATE_SEM _IOW ('N', 0x80, struct ntsync_sem_args)
    +#define NTSYNC_IOC_WAIT_ANY _IOWR('N', 0x82, struct ntsync_wait_args)
    +#define NTSYNC_IOC_WAIT_ALL _IOWR('N', 0x83, struct ntsync_wait_args)
    +#define NTSYNC_IOC_CREATE_MUTEX _IOW ('N', 0x84, struct ntsync_mutex_args)
    +#define NTSYNC_IOC_CREATE_EVENT _IOW ('N',