Update binder to the latest version
Over the last years, many fixes and changes have been applied to Binder in the Linux kernel, but these fixes were never backported to anbox-modules. The meant that the version of Binder in anbox-modules was very outdated. With this commit, Binder has been update to the latest state from the Linux kernel. This should also fix the compile on kernel 5.12 and later.
This commit is contained in:
parent
6ddae19459
commit
0338a34979
|
@ -0,0 +1,9 @@
|
|||
*.ko
|
||||
*.mod
|
||||
*.mod.c
|
||||
*.o
|
||||
*.order
|
||||
*.symvers
|
||||
*.swp
|
||||
.*.cmd
|
||||
.tmp_versions
|
|
@ -1,6 +1,6 @@
|
|||
ccflags-y += -I$(src) -Wno-int-conversion -DCONFIG_ANDROID_BINDER_DEVICES="\"binder\""
|
||||
ccflags-y += -I$(src) -Wno-int-conversion -DCONFIG_ANDROID_BINDER_DEVICES="\"binder\"" -DCONFIG_ANDROID_BINDERFS="y"
|
||||
obj-m := binder_linux.o
|
||||
binder_linux-y := deps.o binder.o
|
||||
binder_linux-y := deps.o binder.o binder_alloc.o binderfs.o
|
||||
|
||||
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
|
||||
|
||||
|
@ -11,4 +11,4 @@ install:
|
|||
cp binder_linux.ko $(DESTDIR)/
|
||||
|
||||
clean:
|
||||
rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions
|
||||
rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions
|
||||
|
|
5325
binder/binder.c
5325
binder/binder.c
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
*
|
||||
|
@ -20,6 +21,8 @@
|
|||
#ifndef _UAPI_LINUX_BINDER_H
|
||||
#define _UAPI_LINUX_BINDER_H
|
||||
|
||||
#define BINDER_IPC_32BIT 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
|
@ -40,6 +43,14 @@ enum {
|
|||
enum {
|
||||
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
|
||||
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
|
||||
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
|
||||
*
|
||||
* Only when set, causes senders to include their security
|
||||
* context
|
||||
*/
|
||||
FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
|
||||
};
|
||||
|
||||
#ifdef BINDER_IPC_32BIT
|
||||
|
@ -132,6 +143,7 @@ enum {
|
|||
|
||||
/* struct binder_fd_array_object - object describing an array of fds in a buffer
|
||||
* @hdr: common header structure
|
||||
* @pad: padding to ensure correct alignment
|
||||
* @num_fds: number of file descriptors in the buffer
|
||||
* @parent: index in offset array to buffer holding the fd array
|
||||
* @parent_offset: start offset of fd array in the buffer
|
||||
|
@ -152,6 +164,7 @@ enum {
|
|||
*/
|
||||
struct binder_fd_array_object {
|
||||
struct binder_object_header hdr;
|
||||
__u32 pad;
|
||||
binder_size_t num_fds;
|
||||
binder_size_t parent;
|
||||
binder_size_t parent_offset;
|
||||
|
@ -184,6 +197,28 @@ struct binder_version {
|
|||
#define BINDER_CURRENT_PROTOCOL_VERSION 8
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
|
||||
* Set ptr to NULL for the first call to get the info for the first node, and
|
||||
* then repeat the call passing the previously returned value to get the next
|
||||
* nodes. ptr will be 0 when there are no more nodes.
|
||||
*/
|
||||
struct binder_node_debug_info {
|
||||
binder_uintptr_t ptr;
|
||||
binder_uintptr_t cookie;
|
||||
__u32 has_strong_ref;
|
||||
__u32 has_weak_ref;
|
||||
};
|
||||
|
||||
struct binder_node_info_for_ref {
|
||||
__u32 handle;
|
||||
__u32 strong_count;
|
||||
__u32 weak_count;
|
||||
__u32 reserved1;
|
||||
__u32 reserved2;
|
||||
__u32 reserved3;
|
||||
};
|
||||
|
||||
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
|
||||
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
|
||||
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
|
||||
|
@ -191,6 +226,9 @@ struct binder_version {
|
|||
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
|
||||
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
|
||||
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
|
||||
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
|
||||
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
|
||||
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
|
||||
|
||||
/*
|
||||
* NOTE: Two special error codes you should check for when calling
|
||||
|
@ -212,6 +250,7 @@ enum transaction_flags {
|
|||
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
|
||||
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
|
||||
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
|
||||
TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */
|
||||
};
|
||||
|
||||
struct binder_transaction_data {
|
||||
|
@ -249,6 +288,11 @@ struct binder_transaction_data {
|
|||
} data;
|
||||
};
|
||||
|
||||
struct binder_transaction_data_secctx {
|
||||
struct binder_transaction_data transaction_data;
|
||||
binder_uintptr_t secctx;
|
||||
};
|
||||
|
||||
struct binder_transaction_data_sg {
|
||||
struct binder_transaction_data transaction_data;
|
||||
binder_size_t buffers_size;
|
||||
|
@ -284,6 +328,11 @@ enum binder_driver_return_protocol {
|
|||
BR_OK = _IO('r', 1),
|
||||
/* No parameters! */
|
||||
|
||||
BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
|
||||
struct binder_transaction_data_secctx),
|
||||
/*
|
||||
* binder_transaction_data_secctx: the received command.
|
||||
*/
|
||||
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
|
||||
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
|
||||
/*
|
||||
|
@ -358,7 +407,7 @@ enum binder_driver_return_protocol {
|
|||
|
||||
BR_FAILED_REPLY = _IO('r', 17),
|
||||
/*
|
||||
* The the last transaction (either a bcTRANSACTION or
|
||||
* The last transaction (either a bcTRANSACTION or
|
||||
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
|
||||
*/
|
||||
};
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,181 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2017 Google, Inc.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BINDER_ALLOC_H
|
||||
#define _LINUX_BINDER_ALLOC_H
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rtmutex.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include <uapi/linux/android/binder.h>
|
||||
|
||||
extern struct list_lru binder_alloc_lru;
|
||||
struct binder_transaction;
|
||||
|
||||
/**
|
||||
* struct binder_buffer - buffer used for binder transactions
|
||||
* @entry: entry alloc->buffers
|
||||
* @rb_node: node for allocated_buffers/free_buffers rb trees
|
||||
* @free: %true if buffer is free
|
||||
* @clear_on_free: %true if buffer must be zeroed after use
|
||||
* @allow_user_free: %true if user is allowed to free buffer
|
||||
* @async_transaction: %true if buffer is in use for an async txn
|
||||
* @debug_id: unique ID for debugging
|
||||
* @transaction: pointer to associated struct binder_transaction
|
||||
* @target_node: struct binder_node associated with this buffer
|
||||
* @data_size: size of @transaction data
|
||||
* @offsets_size: size of array of offsets
|
||||
* @extra_buffers_size: size of space for other objects (like sg lists)
|
||||
* @user_data: user pointer to base of buffer space
|
||||
* @pid: pid to attribute the buffer to (caller)
|
||||
*
|
||||
* Bookkeeping structure for binder transaction buffers
|
||||
*/
|
||||
struct binder_buffer {
|
||||
struct list_head entry; /* free and allocated entries by address */
|
||||
struct rb_node rb_node; /* free entry by size or allocated entry */
|
||||
/* by address */
|
||||
unsigned free:1;
|
||||
unsigned clear_on_free:1;
|
||||
unsigned allow_user_free:1;
|
||||
unsigned async_transaction:1;
|
||||
unsigned debug_id:28;
|
||||
|
||||
struct binder_transaction *transaction;
|
||||
|
||||
struct binder_node *target_node;
|
||||
size_t data_size;
|
||||
size_t offsets_size;
|
||||
size_t extra_buffers_size;
|
||||
void __user *user_data;
|
||||
int pid;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_lru_page - page object used for binder shrinker
|
||||
* @page_ptr: pointer to physical page in mmap'd space
|
||||
* @lru: entry in binder_alloc_lru
|
||||
* @alloc: binder_alloc for a proc
|
||||
*/
|
||||
struct binder_lru_page {
|
||||
struct list_head lru;
|
||||
struct page *page_ptr;
|
||||
struct binder_alloc *alloc;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_alloc - per-binder proc state for binder allocator
|
||||
* @vma: vm_area_struct passed to mmap_handler
|
||||
* (invarient after mmap)
|
||||
* @tsk: tid for task that called init for this proc
|
||||
* (invariant after init)
|
||||
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
|
||||
* @buffer: base of per-proc address space mapped via mmap
|
||||
* @buffers: list of all buffers for this proc
|
||||
* @free_buffers: rb tree of buffers available for allocation
|
||||
* sorted by size
|
||||
* @allocated_buffers: rb tree of allocated buffers sorted by address
|
||||
* @free_async_space: VA space available for async buffers. This is
|
||||
* initialized at mmap time to 1/2 the full VA space
|
||||
* @pages: array of binder_lru_page
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
* @pages_high: high watermark of offset in @pages
|
||||
*
|
||||
* Bookkeeping structure for per-proc address space management for binder
|
||||
* buffers. It is normally initialized during binder_init() and binder_mmap()
|
||||
* calls. The address space is used for both user-visible buffers and for
|
||||
* struct binder_buffer objects used to track the user buffers
|
||||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *vma_vm_mm;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
struct rb_root free_buffers;
|
||||
struct rb_root allocated_buffers;
|
||||
size_t free_async_space;
|
||||
struct binder_lru_page *pages;
|
||||
size_t buffer_size;
|
||||
uint32_t buffer_free;
|
||||
int pid;
|
||||
size_t pages_high;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc);
|
||||
#else
|
||||
static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
|
||||
#endif
|
||||
enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
struct list_lru_one *lru,
|
||||
spinlock_t *lock, void *cb_arg);
|
||||
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async,
|
||||
int pid);
|
||||
extern void binder_alloc_init(struct binder_alloc *alloc);
|
||||
extern int binder_alloc_shrinker_init(void);
|
||||
extern void binder_alloc_shrinker_exit(void);
|
||||
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
||||
extern struct binder_buffer *
|
||||
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
uintptr_t user_ptr);
|
||||
extern void binder_alloc_free_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer);
|
||||
extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
struct vm_area_struct *vma);
|
||||
extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
|
||||
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
|
||||
extern void binder_alloc_print_allocated(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
void binder_alloc_print_pages(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
|
||||
/**
|
||||
* binder_alloc_get_free_async_space() - get free space available for async
|
||||
* @alloc: binder_alloc for this proc
|
||||
*
|
||||
* Return: the bytes remaining in the address-space for async transactions
|
||||
*/
|
||||
static inline size_t
|
||||
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
|
||||
{
|
||||
size_t free_async_space;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
free_async_space = alloc->free_async_space;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
return free_async_space;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
const void __user *from,
|
||||
size_t bytes);
|
||||
|
||||
int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
void *src,
|
||||
size_t bytes);
|
||||
|
||||
int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
|
||||
void *dest,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
size_t bytes);
|
||||
|
||||
#endif /* _LINUX_BINDER_ALLOC_H */
|
||||
|
|
@ -0,0 +1,150 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _LINUX_BINDER_INTERNAL_H
|
||||
#define _LINUX_BINDER_INTERNAL_H
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uidgid.h>
|
||||
|
||||
struct binder_context {
|
||||
struct binder_node *binder_context_mgr_node;
|
||||
struct mutex context_mgr_node_lock;
|
||||
kuid_t binder_context_mgr_uid;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_device - information about a binder device node
|
||||
* @hlist: list of binder devices (only used for devices requested via
|
||||
* CONFIG_ANDROID_BINDER_DEVICES)
|
||||
* @miscdev: information about a binder character device node
|
||||
* @context: binder context information
|
||||
* @binderfs_inode: This is the inode of the root dentry of the super block
|
||||
* belonging to a binderfs mount.
|
||||
*/
|
||||
struct binder_device {
|
||||
struct hlist_node hlist;
|
||||
struct miscdevice miscdev;
|
||||
struct binder_context context;
|
||||
struct inode *binderfs_inode;
|
||||
refcount_t ref;
|
||||
};
|
||||
|
||||
/**
|
||||
* binderfs_mount_opts - mount options for binderfs
|
||||
* @max: maximum number of allocatable binderfs binder devices
|
||||
* @stats_mode: enable binder stats in binderfs.
|
||||
*/
|
||||
struct binderfs_mount_opts {
|
||||
int max;
|
||||
int stats_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* binderfs_info - information about a binderfs mount
|
||||
* @ipc_ns: The ipc namespace the binderfs mount belongs to.
|
||||
* @control_dentry: This records the dentry of this binderfs mount
|
||||
* binder-control device.
|
||||
* @root_uid: uid that needs to be used when a new binder device is
|
||||
* created.
|
||||
* @root_gid: gid that needs to be used when a new binder device is
|
||||
* created.
|
||||
* @mount_opts: The mount options in use.
|
||||
* @device_count: The current number of allocated binder devices.
|
||||
* @proc_log_dir: Pointer to the directory dentry containing process-specific
|
||||
* logs.
|
||||
*/
|
||||
struct binderfs_info {
|
||||
struct ipc_namespace *ipc_ns;
|
||||
struct dentry *control_dentry;
|
||||
kuid_t root_uid;
|
||||
kgid_t root_gid;
|
||||
struct binderfs_mount_opts mount_opts;
|
||||
int device_count;
|
||||
struct dentry *proc_log_dir;
|
||||
};
|
||||
|
||||
extern const struct file_operations binder_fops;
|
||||
|
||||
extern char *binder_devices_param;
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDERFS
|
||||
extern bool is_binderfs_device(const struct inode *inode);
|
||||
extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
|
||||
const struct file_operations *fops,
|
||||
void *data);
|
||||
extern void binderfs_remove_file(struct dentry *dentry);
|
||||
#else
|
||||
static inline bool is_binderfs_device(const struct inode *inode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline struct dentry *binderfs_create_file(struct dentry *dir,
|
||||
const char *name,
|
||||
const struct file_operations *fops,
|
||||
void *data)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void binderfs_remove_file(struct dentry *dentry) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDERFS
|
||||
extern int __init init_binderfs(void);
|
||||
extern void __exit exit_binderfs(void);
|
||||
#else
|
||||
static inline int __init init_binderfs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void __exit exit_binderfs(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int binder_stats_show(struct seq_file *m, void *unused);
|
||||
DEFINE_SHOW_ATTRIBUTE(binder_stats);
|
||||
|
||||
int binder_state_show(struct seq_file *m, void *unused);
|
||||
DEFINE_SHOW_ATTRIBUTE(binder_state);
|
||||
|
||||
int binder_transactions_show(struct seq_file *m, void *unused);
|
||||
DEFINE_SHOW_ATTRIBUTE(binder_transactions);
|
||||
|
||||
int binder_transaction_log_show(struct seq_file *m, void *unused);
|
||||
DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
|
||||
|
||||
struct binder_transaction_log_entry {
|
||||
int debug_id;
|
||||
int debug_id_done;
|
||||
int call_type;
|
||||
int from_proc;
|
||||
int from_thread;
|
||||
int target_handle;
|
||||
int to_proc;
|
||||
int to_thread;
|
||||
int to_node;
|
||||
int data_size;
|
||||
int offsets_size;
|
||||
int return_error_line;
|
||||
uint32_t return_error;
|
||||
uint32_t return_error_param;
|
||||
char context_name[BINDERFS_MAX_NAME + 1];
|
||||
};
|
||||
|
||||
struct binder_transaction_log {
|
||||
atomic_t cur;
|
||||
bool full;
|
||||
struct binder_transaction_log_entry entry[32];
|
||||
};
|
||||
|
||||
extern struct binder_transaction_log binder_transaction_log;
|
||||
extern struct binder_transaction_log binder_transaction_log_failed;
|
||||
#endif /* _LINUX_BINDER_INTERNAL_H */
|
|
@ -1,15 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
|
@ -23,7 +14,8 @@
|
|||
struct binder_buffer;
|
||||
struct binder_node;
|
||||
struct binder_proc;
|
||||
struct binder_ref;
|
||||
struct binder_alloc;
|
||||
struct binder_ref_data;
|
||||
struct binder_thread;
|
||||
struct binder_transaction;
|
||||
|
||||
|
@ -146,8 +138,8 @@ TRACE_EVENT(binder_transaction_received,
|
|||
|
||||
TRACE_EVENT(binder_transaction_node_to_ref,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref *ref),
|
||||
TP_ARGS(t, node, ref),
|
||||
struct binder_ref_data *rdata),
|
||||
TP_ARGS(t, node, rdata),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -160,8 +152,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
|
|||
__entry->debug_id = t->debug_id;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->node_ptr = node->ptr;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
__entry->ref_debug_id = rdata->debug_id;
|
||||
__entry->ref_desc = rdata->desc;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
|
@ -170,8 +162,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
|
|||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_ref_to_node,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
|
||||
TP_ARGS(t, ref),
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref_data *rdata),
|
||||
TP_ARGS(t, node, rdata),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -182,10 +175,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
|
|||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
__entry->node_debug_id = ref->node->debug_id;
|
||||
__entry->node_ptr = ref->node->ptr;
|
||||
__entry->ref_debug_id = rdata->debug_id;
|
||||
__entry->ref_desc = rdata->desc;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->node_ptr = node->ptr;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
|
@ -194,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
|
|||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_ref_to_ref,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
|
||||
struct binder_ref *dest_ref),
|
||||
TP_ARGS(t, src_ref, dest_ref),
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref_data *src_ref,
|
||||
struct binder_ref_data *dest_ref),
|
||||
TP_ARGS(t, node, src_ref, dest_ref),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -208,7 +202,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
|
|||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->node_debug_id = src_ref->node->debug_id;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->src_ref_debug_id = src_ref->debug_id;
|
||||
__entry->src_ref_desc = src_ref->desc;
|
||||
__entry->dest_ref_debug_id = dest_ref->debug_id;
|
||||
|
@ -220,22 +214,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
|
|||
__entry->dest_ref_debug_id, __entry->dest_ref_desc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_fd,
|
||||
TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
|
||||
TP_ARGS(t, src_fd, dest_fd),
|
||||
TRACE_EVENT(binder_transaction_fd_send,
|
||||
TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
|
||||
TP_ARGS(t, fd, offset),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
__field(int, src_fd)
|
||||
__field(int, dest_fd)
|
||||
__field(int, fd)
|
||||
__field(size_t, offset)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->src_fd = src_fd;
|
||||
__entry->dest_fd = dest_fd;
|
||||
__entry->fd = fd;
|
||||
__entry->offset = offset;
|
||||
),
|
||||
TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
|
||||
__entry->debug_id, __entry->src_fd, __entry->dest_fd)
|
||||
TP_printk("transaction=%d src_fd=%d offset=%zu",
|
||||
__entry->debug_id, __entry->fd, __entry->offset)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_fd_recv,
|
||||
TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
|
||||
TP_ARGS(t, fd, offset),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
__field(int, fd)
|
||||
__field(size_t, offset)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->fd = fd;
|
||||
__entry->offset = offset;
|
||||
),
|
||||
TP_printk("transaction=%d dest_fd=%d offset=%zu",
|
||||
__entry->debug_id, __entry->fd, __entry->offset)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(binder_buffer_class,
|
||||
|
@ -245,14 +257,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class,
|
|||
__field(int, debug_id)
|
||||
__field(size_t, data_size)
|
||||
__field(size_t, offsets_size)
|
||||
__field(size_t, extra_buffers_size)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = buf->debug_id;
|
||||
__entry->data_size = buf->data_size;
|
||||
__entry->offsets_size = buf->offsets_size;
|
||||
__entry->extra_buffers_size = buf->extra_buffers_size;
|
||||
),
|
||||
TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
|
||||
__entry->debug_id, __entry->data_size, __entry->offsets_size)
|
||||
TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd",
|
||||
__entry->debug_id, __entry->data_size, __entry->offsets_size,
|
||||
__entry->extra_buffers_size)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
|
||||
|
@ -268,9 +283,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
|
|||
TP_ARGS(buffer));
|
||||
|
||||
TRACE_EVENT(binder_update_page_range,
|
||||
TP_PROTO(struct binder_proc *proc, bool allocate,
|
||||
void *start, void *end),
|
||||
TP_ARGS(proc, allocate, start, end),
|
||||
TP_PROTO(struct binder_alloc *alloc, bool allocate,
|
||||
void __user *start, void __user *end),
|
||||
TP_ARGS(alloc, allocate, start, end),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(bool, allocate)
|
||||
|
@ -278,9 +293,9 @@ TRACE_EVENT(binder_update_page_range,
|
|||
__field(size_t, size)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = proc->pid;
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->allocate = allocate;
|
||||
__entry->offset = start - proc->buffer;
|
||||
__entry->offset = start - alloc->buffer;
|
||||
__entry->size = end - start;
|
||||
),
|
||||
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
|
||||
|
@ -288,6 +303,61 @@ TRACE_EVENT(binder_update_page_range,
|
|||
__entry->offset, __entry->size)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(binder_lru_page_class,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(size_t, page_index)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->page_index = page_index;
|
||||
),
|
||||
TP_printk("proc=%d page_index=%zu",
|
||||
__entry->proc, __entry->page_index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
TRACE_EVENT(binder_command,
|
||||
TP_PROTO(uint32_t cmd),
|
||||
TP_ARGS(cmd),
|
||||
|
|
|
@ -0,0 +1,812 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/compiler_types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fsnotify.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ipc_namespace.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <uapi/asm-generic/errno-base.h>
|
||||
#include <uapi/linux/android/binder.h>
|
||||
#include <uapi/linux/android/binderfs.h>
|
||||
|
||||
#include "binder_internal.h"
|
||||
#include "deps.h"
|
||||
|
||||
#define FIRST_INODE 1
|
||||
#define SECOND_INODE 2
|
||||
#define INODE_OFFSET 3
|
||||
#define INTSTRLEN 21
|
||||
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
|
||||
/* Ensure that the initial ipc namespace always has devices available. */
|
||||
#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
|
||||
|
||||
static dev_t binderfs_dev;
|
||||
static DEFINE_MUTEX(binderfs_minors_mutex);
|
||||
static DEFINE_IDA(binderfs_minors);
|
||||
|
||||
enum binderfs_param {
|
||||
Opt_max,
|
||||
Opt_stats_mode,
|
||||
};
|
||||
|
||||
enum binderfs_stats_mode {
|
||||
binderfs_stats_mode_unset,
|
||||
binderfs_stats_mode_global,
|
||||
};
|
||||
|
||||
static const struct constant_table binderfs_param_stats[] = {
|
||||
{ "global", binderfs_stats_mode_global },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct fs_parameter_spec binderfs_fs_parameters[] = {
|
||||
fsparam_u32("max", Opt_max),
|
||||
fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
|
||||
{}
|
||||
};
|
||||
|
||||
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
|
||||
{
|
||||
return sb->s_fs_info;
|
||||
}
|
||||
|
||||
bool is_binderfs_device(const struct inode *inode)
|
||||
{
|
||||
if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* binderfs_binder_device_create - allocate inode from super block of a
|
||||
* binderfs mount
|
||||
* @ref_inode: inode from wich the super block will be taken
|
||||
* @userp: buffer to copy information about new device for userspace to
|
||||
* @req: struct binderfs_device as copied from userspace
|
||||
*
|
||||
* This function allocates a new binder_device and reserves a new minor
|
||||
* number for it.
|
||||
* Minor numbers are limited and tracked globally in binderfs_minors. The
|
||||
* function will stash a struct binder_device for the specific binder
|
||||
* device in i_private of the inode.
|
||||
* It will go on to allocate a new inode from the super block of the
|
||||
* filesystem mount, stash a struct binder_device in its i_private field
|
||||
* and attach a dentry to that inode.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure
|
||||
*/
|
||||
static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||
struct binderfs_device __user *userp,
|
||||
struct binderfs_device *req)
|
||||
{
|
||||
int minor, ret;
|
||||
struct dentry *dentry, *root;
|
||||
struct binder_device *device;
|
||||
char *name = NULL;
|
||||
size_t name_len;
|
||||
struct inode *inode = NULL;
|
||||
struct super_block *sb = ref_inode->i_sb;
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
#if defined(CONFIG_IPC_NS)
|
||||
bool use_reserve = (info->ipc_ns == get_init_ipc_ns_ptr());
|
||||
#else
|
||||
bool use_reserve = true;
|
||||
#endif
|
||||
|
||||
/* Reserve new minor number for the new device. */
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
if (++info->device_count <= info->mount_opts.max)
|
||||
minor = ida_alloc_max(&binderfs_minors,
|
||||
use_reserve ? BINDERFS_MAX_MINOR :
|
||||
BINDERFS_MAX_MINOR_CAPPED,
|
||||
GFP_KERNEL);
|
||||
else
|
||||
minor = -ENOSPC;
|
||||
if (minor < 0) {
|
||||
--info->device_count;
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
return minor;
|
||||
}
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
ret = -ENOMEM;
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
if (!device)
|
||||
goto err;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
goto err;
|
||||
|
||||
inode->i_ino = minor + INODE_OFFSET;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
init_special_inode(inode, S_IFCHR | 0600,
|
||||
MKDEV(MAJOR(binderfs_dev), minor));
|
||||
inode->i_fop = &binder_fops;
|
||||
inode->i_uid = info->root_uid;
|
||||
inode->i_gid = info->root_gid;
|
||||
|
||||
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
|
||||
name_len = strlen(req->name);
|
||||
/* Make sure to include terminating NUL byte */
|
||||
name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
|
||||
if (!name)
|
||||
goto err;
|
||||
|
||||
refcount_set(&device->ref, 1);
|
||||
device->binderfs_inode = inode;
|
||||
device->context.binder_context_mgr_uid = INVALID_UID;
|
||||
device->context.name = name;
|
||||
device->miscdev.name = name;
|
||||
device->miscdev.minor = minor;
|
||||
mutex_init(&device->context.context_mgr_node_lock);
|
||||
|
||||
req->major = MAJOR(binderfs_dev);
|
||||
req->minor = minor;
|
||||
|
||||
if (userp && copy_to_user(userp, req, sizeof(*req))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
root = sb->s_root;
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
/* look it up */
|
||||
dentry = lookup_one_len(name, root, name_len);
|
||||
if (IS_ERR(dentry)) {
|
||||
inode_unlock(d_inode(root));
|
||||
ret = PTR_ERR(dentry);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (d_really_is_positive(dentry)) {
|
||||
/* already exists */
|
||||
dput(dentry);
|
||||
inode_unlock(d_inode(root));
|
||||
ret = -EEXIST;
|
||||
goto err;
|
||||
}
|
||||
|
||||
inode->i_private = device;
|
||||
d_instantiate(dentry, inode);
|
||||
fsnotify_create(root->d_inode, dentry);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(name);
|
||||
kfree(device);
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
--info->device_count;
|
||||
ida_free(&binderfs_minors, minor);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
iput(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* binderfs_ctl_ioctl - handle binder device node allocation requests
|
||||
*
|
||||
* The request handler for the binder-control device. All requests operate on
|
||||
* the binderfs mount the binder-control device resides in:
|
||||
* - BINDER_CTL_ADD
|
||||
* Allocate a new binder device.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure
|
||||
*/
|
||||
static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
|
||||
struct binderfs_device device_req;
|
||||
|
||||
switch (cmd) {
|
||||
case BINDER_CTL_ADD:
|
||||
ret = copy_from_user(&device_req, device, sizeof(device_req));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = binderfs_binder_device_create(inode, device, &device_req);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void binderfs_evict_inode(struct inode *inode)
|
||||
{
|
||||
struct binder_device *device = inode->i_private;
|
||||
struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
|
||||
|
||||
clear_inode(inode);
|
||||
|
||||
if (!S_ISCHR(inode->i_mode) || !device)
|
||||
return;
|
||||
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
--info->device_count;
|
||||
ida_free(&binderfs_minors, device->miscdev.minor);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
if (refcount_dec_and_test(&device->ref)) {
|
||||
kfree(device->context.name);
|
||||
kfree(device);
|
||||
}
|
||||
}
|
||||
|
||||
static int binderfs_fs_context_parse_param(struct fs_context *fc,
|
||||
struct fs_parameter *param)
|
||||
{
|
||||
int opt;
|
||||
struct binderfs_mount_opts *ctx = fc->fs_private;
|
||||
struct fs_parse_result result;
|
||||
|
||||
opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
|
||||
if (opt < 0)
|
||||
return opt;
|
||||
|
||||
switch (opt) {
|
||||
case Opt_max:
|
||||
if (result.uint_32 > BINDERFS_MAX_MINOR)
|
||||
return invalfc(fc, "Bad value for '%s'", param->key);
|
||||
|
||||
ctx->max = result.uint_32;
|
||||
break;
|
||||
case Opt_stats_mode:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ctx->stats_mode = result.uint_32;
|
||||
break;
|
||||
default:
|
||||
return invalfc(fc, "Unsupported parameter '%s'", param->key);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binderfs_fs_context_reconfigure(struct fs_context *fc)
|
||||
{
|
||||
struct binderfs_mount_opts *ctx = fc->fs_private;
|
||||
struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
|
||||
|
||||
if (info->mount_opts.stats_mode != ctx->stats_mode)
|
||||
return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
|
||||
|
||||
info->mount_opts.stats_mode = ctx->stats_mode;
|
||||
info->mount_opts.max = ctx->max;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct binderfs_info *info = BINDERFS_SB(root->d_sb);
|
||||
|
||||
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
|
||||
seq_printf(seq, ",max=%d", info->mount_opts.max);
|
||||
|
||||
switch (info->mount_opts.stats_mode) {
|
||||
case binderfs_stats_mode_unset:
|
||||
break;
|
||||
case binderfs_stats_mode_global:
|
||||
seq_printf(seq, ",stats=global");
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void binderfs_put_super(struct super_block *sb)
|
||||
{
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
|
||||
if (info && info->ipc_ns)
|
||||
put_ipc_ns(info->ipc_ns);
|
||||
|
||||
kfree(info);
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
static const struct super_operations binderfs_super_ops = {
|
||||
.evict_inode = binderfs_evict_inode,
|
||||
.show_options = binderfs_show_options,
|
||||
.statfs = simple_statfs,
|
||||
.put_super = binderfs_put_super,
|
||||
};
|
||||
|
||||
static inline bool is_binderfs_control_device(const struct dentry *dentry)
|
||||
{
|
||||
struct binderfs_info *info = dentry->d_sb->s_fs_info;
|
||||
|
||||
return info->control_dentry == dentry;
|
||||
}
|
||||
|
||||
static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
if (is_binderfs_control_device(old_dentry) ||
|
||||
is_binderfs_control_device(new_dentry))
|
||||
return -EPERM;
|
||||
|
||||
return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||
}
|
||||
|
||||
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
if (is_binderfs_control_device(dentry))
|
||||
return -EPERM;
|
||||
|
||||
return simple_unlink(dir, dentry);
|
||||
}
|
||||
|
||||
static const struct file_operations binder_ctl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = nonseekable_open,
|
||||
.unlocked_ioctl = binder_ctl_ioctl,
|
||||
.compat_ioctl = binder_ctl_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
/**
|
||||
* binderfs_binder_ctl_create - create a new binder-control device
|
||||
* @sb: super block of the binderfs mount
|
||||
*
|
||||
* This function creates a new binder-control device node in the binderfs mount
|
||||
* referred to by @sb.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure
|
||||
*/
|
||||
static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||
{
|
||||
int minor, ret;
|
||||
struct dentry *dentry;
|
||||
struct binder_device *device;
|
||||
struct inode *inode = NULL;
|
||||
struct dentry *root = sb->s_root;
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
#if defined(CONFIG_IPC_NS)
|
||||
bool use_reserve = (info->ipc_ns == get_init_ipc_ns_ptr());
|
||||
#else
|
||||
bool use_reserve = true;
|
||||
#endif
|
||||
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
if (!device)
|
||||
return -ENOMEM;
|
||||
|
||||
/* If we have already created a binder-control node, return. */
|
||||
if (info->control_dentry) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
/* Reserve a new minor number for the new device. */
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
minor = ida_alloc_max(&binderfs_minors,
|
||||
use_reserve ? BINDERFS_MAX_MINOR :
|
||||
BINDERFS_MAX_MINOR_CAPPED,
|
||||
GFP_KERNEL);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
if (minor < 0) {
|
||||
ret = minor;
|
||||
goto out;
|
||||
}
|
||||
|
||||
inode->i_ino = SECOND_INODE;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
init_special_inode(inode, S_IFCHR | 0600,
|
||||
MKDEV(MAJOR(binderfs_dev), minor));
|
||||
inode->i_fop = &binder_ctl_fops;
|
||||
inode->i_uid = info->root_uid;
|
||||
inode->i_gid = info->root_gid;
|
||||
|
||||
refcount_set(&device->ref, 1);
|
||||
device->binderfs_inode = inode;
|
||||
device->miscdev.minor = minor;
|
||||
|
||||
dentry = d_alloc_name(root, "binder-control");
|
||||
if (!dentry)
|
||||
goto out;
|
||||
|
||||
inode->i_private = device;
|
||||
info->control_dentry = dentry;
|
||||
d_add(dentry, inode);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
kfree(device);
|
||||
iput(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct inode_operations binderfs_dir_inode_operations = {
|
||||
.lookup = simple_lookup,
|
||||
.rename = binderfs_rename,
|
||||
.unlink = binderfs_unlink,
|
||||
};
|
||||
|
||||
static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
|
||||
{
|
||||
struct inode *ret;
|
||||
|
||||
ret = new_inode(sb);
|
||||
if (ret) {
|
||||
ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
|
||||
ret->i_mode = mode;
|
||||
ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct dentry *binderfs_create_dentry(struct dentry *parent,
|
||||
const char *name)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
|
||||
dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (IS_ERR(dentry))
|
||||
return dentry;
|
||||
|
||||
/* Return error if the file/dir already exists. */
|
||||
if (d_really_is_positive(dentry)) {
|
||||
dput(dentry);
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
|
||||
return dentry;
|
||||
}
|
||||
|
||||
void binderfs_remove_file(struct dentry *dentry)
|
||||
{
|
||||
struct inode *parent_inode;
|
||||
|
||||
parent_inode = d_inode(dentry->d_parent);
|
||||
inode_lock(parent_inode);
|
||||
if (simple_positive(dentry)) {
|
||||
dget(dentry);
|
||||
simple_unlink(parent_inode, dentry);
|
||||
d_delete(dentry);
|
||||
dput(dentry);
|
||||
}
|
||||
inode_unlock(parent_inode);
|
||||
}
|
||||
|
||||
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
|
||||
const struct file_operations *fops,
|
||||
void *data)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
struct inode *new_inode, *parent_inode;
|
||||
struct super_block *sb;
|
||||
|
||||
parent_inode = d_inode(parent);
|
||||
inode_lock(parent_inode);
|
||||
|
||||
dentry = binderfs_create_dentry(parent, name);
|
||||
if (IS_ERR(dentry))
|
||||
goto out;
|
||||
|
||||
sb = parent_inode->i_sb;
|
||||
new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
|
||||
if (!new_inode) {
|
||||
dput(dentry);
|
||||
dentry = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_inode->i_fop = fops;
|
||||
new_inode->i_private = data;
|
||||
d_instantiate(dentry, new_inode);
|
||||
fsnotify_create(parent_inode, dentry);
|
||||
|
||||
out:
|
||||
inode_unlock(parent_inode);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static struct dentry *binderfs_create_dir(struct dentry *parent,
|
||||
const char *name)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
struct inode *new_inode, *parent_inode;
|
||||
struct super_block *sb;
|
||||
|
||||
parent_inode = d_inode(parent);
|
||||
inode_lock(parent_inode);
|
||||
|
||||
dentry = binderfs_create_dentry(parent, name);
|
||||
if (IS_ERR(dentry))
|
||||
goto out;
|
||||
|
||||
sb = parent_inode->i_sb;
|
||||
new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
|
||||
if (!new_inode) {
|
||||
dput(dentry);
|
||||
dentry = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_inode->i_fop = &simple_dir_operations;
|
||||
new_inode->i_op = &simple_dir_inode_operations;
|
||||
|
||||
set_nlink(new_inode, 2);
|
||||
d_instantiate(dentry, new_inode);
|
||||
inc_nlink(parent_inode);
|
||||
fsnotify_mkdir(parent_inode, dentry);
|
||||
|
||||
out:
|
||||
inode_unlock(parent_inode);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static int init_binder_logs(struct super_block *sb)
|
||||
{
|
||||
struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
|
||||
struct binderfs_info *info;
|
||||
int ret = 0;
|
||||
|
||||
binder_logs_root_dir = binderfs_create_dir(sb->s_root,
|
||||
"binder_logs");
|
||||
if (IS_ERR(binder_logs_root_dir)) {
|
||||
ret = PTR_ERR(binder_logs_root_dir);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = binderfs_create_file(binder_logs_root_dir, "stats",
|
||||
&binder_stats_fops, NULL);
|
||||
if (IS_ERR(dentry)) {
|
||||
ret = PTR_ERR(dentry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = binderfs_create_file(binder_logs_root_dir, "state",
|
||||
&binder_state_fops, NULL);
|
||||
if (IS_ERR(dentry)) {
|
||||
ret = PTR_ERR(dentry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
|
||||
&binder_transactions_fops, NULL);
|
||||
if (IS_ERR(dentry)) {
|
||||
ret = PTR_ERR(dentry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = binderfs_create_file(binder_logs_root_dir,
|
||||
"transaction_log",
|
||||
&binder_transaction_log_fops,
|
||||
&binder_transaction_log);
|
||||
if (IS_ERR(dentry)) {
|
||||
ret = PTR_ERR(dentry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = binderfs_create_file(binder_logs_root_dir,
|
||||
"failed_transaction_log",
|
||||
&binder_transaction_log_fops,
|
||||
&binder_transaction_log_failed);
|
||||
if (IS_ERR(dentry)) {
|
||||
ret = PTR_ERR(dentry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
|
||||
if (IS_ERR(proc_log_dir)) {
|
||||
ret = PTR_ERR(proc_log_dir);
|
||||
goto out;
|
||||
}
|
||||
info = sb->s_fs_info;
|
||||
info->proc_log_dir = proc_log_dir;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
{
|
||||
int ret;
|
||||
struct binderfs_info *info;
|
||||
struct binderfs_mount_opts *ctx = fc->fs_private;
|
||||
struct inode *inode = NULL;
|
||||
struct binderfs_device device_info = {};
|
||||
const char *name;
|
||||
size_t len;
|
||||
|
||||
sb->s_blocksize = PAGE_SIZE;
|
||||
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* The binderfs filesystem can be mounted by userns root in a
|
||||
* non-initial userns. By default such mounts have the SB_I_NODEV flag
|
||||
* set in s_iflags to prevent security issues where userns root can
|
||||
* just create random device nodes via mknod() since it owns the
|
||||
* filesystem mount. But binderfs does not allow to create any files
|
||||
* including devices nodes. The only way to create binder devices nodes
|
||||
* is through the binder-control device which userns root is explicitly
|
||||
* allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
|
||||
* necessary and safe.
|
||||
*/
|
||||
sb->s_iflags &= ~SB_I_NODEV;
|
||||
sb->s_iflags |= SB_I_NOEXEC;
|
||||
sb->s_magic = BINDERFS_SUPER_MAGIC;
|
||||
sb->s_op = &binderfs_super_ops;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||
if (!sb->s_fs_info)
|
||||
return -ENOMEM;
|
||||
info = sb->s_fs_info;
|
||||
|
||||
info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
|
||||
|
||||
info->root_gid = make_kgid(sb->s_user_ns, 0);
|
||||
if (!gid_valid(info->root_gid))
|
||||
info->root_gid = GLOBAL_ROOT_GID;
|
||||
info->root_uid = make_kuid(sb->s_user_ns, 0);
|
||||
if (!uid_valid(info->root_uid))
|
||||
info->root_uid = GLOBAL_ROOT_UID;
|
||||
info->mount_opts.max = ctx->max;
|
||||
info->mount_opts.stats_mode = ctx->stats_mode;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
return -ENOMEM;
|
||||
|
||||
inode->i_ino = FIRST_INODE;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
inode->i_mode = S_IFDIR | 0755;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
inode->i_op = &binderfs_dir_inode_operations;
|
||||
set_nlink(inode, 2);
|
||||
|
||||
sb->s_root = d_make_root(inode);
|
||||
if (!sb->s_root)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = binderfs_binder_ctl_create(sb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
name = binder_devices_param;
|
||||
for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
|
||||
strscpy(device_info.name, name, len + 1);
|
||||
ret = binderfs_binder_device_create(inode, NULL, &device_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
name += len;
|
||||
if (*name == ',')
|
||||
name++;
|
||||
}
|
||||
|
||||
if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
|
||||
return init_binder_logs(sb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binderfs_fs_context_get_tree(struct fs_context *fc)
|
||||
{
|
||||
return get_tree_nodev(fc, binderfs_fill_super);
|
||||
}
|
||||
|
||||
static void binderfs_fs_context_free(struct fs_context *fc)
|
||||
{
|
||||
struct binderfs_mount_opts *ctx = fc->fs_private;
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static const struct fs_context_operations binderfs_fs_context_ops = {
|
||||
.free = binderfs_fs_context_free,
|
||||
.get_tree = binderfs_fs_context_get_tree,
|
||||
.parse_param = binderfs_fs_context_parse_param,
|
||||
.reconfigure = binderfs_fs_context_reconfigure,
|
||||
};
|
||||
|
||||
static int binderfs_init_fs_context(struct fs_context *fc)
|
||||
{
|
||||
struct binderfs_mount_opts *ctx;
|
||||
|
||||
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->max = BINDERFS_MAX_MINOR;
|
||||
ctx->stats_mode = binderfs_stats_mode_unset;
|
||||
|
||||
fc->fs_private = ctx;
|
||||
fc->ops = &binderfs_fs_context_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_system_type binder_fs_type = {
|
||||
.name = "binder",
|
||||
.init_fs_context = binderfs_init_fs_context,
|
||||
.parameters = binderfs_fs_parameters,
|
||||
.kill_sb = kill_litter_super,
|
||||
.fs_flags = FS_USERNS_MOUNT,
|
||||
};
|
||||
|
||||
int __init init_binderfs(void)
|
||||
{
|
||||
int ret;
|
||||
const char *name;
|
||||
size_t len;
|
||||
|
||||
/* Verify that the default binderfs device names are valid. */
|
||||
name = binder_devices_param;
|
||||
for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
|
||||
if (len > BINDERFS_MAX_NAME)
|
||||
return -E2BIG;
|
||||
name += len;
|
||||
if (*name == ',')
|
||||
name++;
|
||||
}
|
||||
|
||||
/* Allocate new major number for binderfs. */
|
||||
ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
|
||||
"binder");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_filesystem(&binder_fs_type);
|
||||
if (ret) {
|
||||
unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __exit exit_binderfs(void)
|
||||
{
|
||||
unregister_filesystem(&binder_fs_type);
|
||||
unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
163
binder/deps.c
163
binder/deps.c
|
@ -2,6 +2,7 @@
|
|||
#include <linux/file.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/ipc_namespace.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -9,6 +10,8 @@
|
|||
#include <linux/kprobes.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
#include "deps.h"
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
|
||||
|
||||
#ifndef CONFIG_KPROBES
|
||||
|
@ -66,103 +69,16 @@ static unsigned long kallsyms_lookup_name_wrapper(const char *name)
|
|||
}
|
||||
|
||||
|
||||
static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
|
||||
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL;
|
||||
#else
|
||||
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = NULL;
|
||||
#endif
|
||||
static int (*map_kernel_range_noflush_ptr)(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) = NULL;
|
||||
static void (*unmap_kernel_range_ptr)(unsigned long, unsigned long) = NULL;
|
||||
static struct files_struct *(*get_files_struct_ptr)(struct task_struct *) = NULL;
|
||||
static void (*put_files_struct_ptr)(struct files_struct *) = NULL;
|
||||
static struct sighand_struct *(*__lock_task_sighand_ptr)(struct task_struct *, unsigned long *) = NULL;
|
||||
static int (*__alloc_fd_ptr)(struct files_struct *files, unsigned start, unsigned end, unsigned flags) = NULL;
|
||||
static void (*__fd_install_ptr)(struct files_struct *files, unsigned int fd, struct file *file) = NULL;
|
||||
static int (*__close_fd_ptr)(struct files_struct *files, unsigned int fd) = NULL;
|
||||
static int (*__close_fd_get_file_ptr)(unsigned int fd, struct file **res) = NULL;
|
||||
|
||||
int __close_fd_get_file(unsigned int fd, struct file **res)
|
||||
{
|
||||
if (!__close_fd_get_file_ptr)
|
||||
__close_fd_get_file_ptr = kallsyms_lookup_name_wrapper("__close_fd_get_file");
|
||||
return __close_fd_get_file_ptr(fd, res);
|
||||
}
|
||||
|
||||
static int (*can_nice_ptr)(const struct task_struct *, const int) = NULL;
|
||||
static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL;
|
||||
static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
|
||||
static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
|
||||
static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL;
|
||||
|
||||
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
||||
{
|
||||
if (!get_vm_area_ptr)
|
||||
get_vm_area_ptr = kallsyms_lookup_name_wrapper("get_vm_area");
|
||||
return get_vm_area_ptr(size, flags);
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
|
||||
#else
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details)
|
||||
#endif
|
||||
{
|
||||
if (!zap_page_range_ptr)
|
||||
zap_page_range_ptr = kallsyms_lookup_name_wrapper("zap_page_range");
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
|
||||
zap_page_range_ptr(vma, address, size);
|
||||
#else
|
||||
zap_page_range_ptr(vma, address, size, details);
|
||||
#endif
|
||||
}
|
||||
|
||||
int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages)
|
||||
{
|
||||
if (!map_kernel_range_noflush_ptr)
|
||||
map_kernel_range_noflush_ptr = kallsyms_lookup_name_wrapper("map_kernel_range_noflush");
|
||||
return map_kernel_range_noflush_ptr(start, size, prot, pages);
|
||||
}
|
||||
|
||||
void unmap_kernel_range(unsigned long addr, unsigned long size)
|
||||
{
|
||||
if (!unmap_kernel_range_ptr)
|
||||
unmap_kernel_range_ptr = kallsyms_lookup_name_wrapper("unmap_kernel_range");
|
||||
unmap_kernel_range_ptr(addr, size);
|
||||
}
|
||||
|
||||
struct files_struct *get_files_struct(struct task_struct *task)
|
||||
{
|
||||
if (!get_files_struct_ptr)
|
||||
get_files_struct_ptr = kallsyms_lookup_name_wrapper("get_files_struct");
|
||||
return get_files_struct_ptr(task);
|
||||
}
|
||||
|
||||
void put_files_struct(struct files_struct *files)
|
||||
{
|
||||
if (!put_files_struct_ptr)
|
||||
put_files_struct_ptr = kallsyms_lookup_name_wrapper("put_files_struct");
|
||||
put_files_struct_ptr(files);
|
||||
}
|
||||
|
||||
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
|
||||
{
|
||||
if (!__lock_task_sighand_ptr)
|
||||
__lock_task_sighand_ptr = kallsyms_lookup_name_wrapper("__lock_task_sighand");
|
||||
return __lock_task_sighand_ptr(tsk, flags);
|
||||
}
|
||||
|
||||
int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags)
|
||||
{
|
||||
if (!__alloc_fd_ptr)
|
||||
__alloc_fd_ptr = kallsyms_lookup_name_wrapper("__alloc_fd");
|
||||
return __alloc_fd_ptr(files, start, end, flags);
|
||||
}
|
||||
|
||||
void __fd_install(struct files_struct *files, unsigned int fd, struct file *file)
|
||||
{
|
||||
if (!__fd_install_ptr)
|
||||
__fd_install_ptr = kallsyms_lookup_name_wrapper("__fd_install");
|
||||
__fd_install_ptr(files, fd, file);
|
||||
}
|
||||
|
||||
int __close_fd(struct files_struct *files, unsigned int fd)
|
||||
{
|
||||
if (!__close_fd_ptr)
|
||||
__close_fd_ptr = kallsyms_lookup_name_wrapper("__close_fd_ptr");
|
||||
return __close_fd_ptr(files, fd);
|
||||
}
|
||||
|
||||
int can_nice(const struct task_struct *p, const int nice)
|
||||
{
|
||||
|
@ -171,6 +87,17 @@ int can_nice(const struct task_struct *p, const int nice)
|
|||
return can_nice_ptr(p, nice);
|
||||
}
|
||||
|
||||
static void (*mmput_async_ptr)(struct mm_struct *mm) = NULL;
|
||||
|
||||
void mmput_async(struct mm_struct *mm)
|
||||
{
|
||||
if (!mmput_async_ptr)
|
||||
mmput_async_ptr = kallsyms_lookup_name_wrapper("mmput_async");
|
||||
return mmput_async_ptr(mm);
|
||||
}
|
||||
|
||||
static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL;
|
||||
|
||||
int security_binder_set_context_mgr(struct task_struct *mgr)
|
||||
{
|
||||
if (!security_binder_set_context_mgr_ptr)
|
||||
|
@ -178,6 +105,8 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
|
|||
return security_binder_set_context_mgr_ptr(mgr);
|
||||
}
|
||||
|
||||
static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
|
||||
|
||||
int security_binder_transaction(struct task_struct *from, struct task_struct *to)
|
||||
{
|
||||
if (!security_binder_transaction_ptr)
|
||||
|
@ -185,6 +114,8 @@ int security_binder_transaction(struct task_struct *from, struct task_struct *to
|
|||
return security_binder_transaction_ptr(from, to);
|
||||
}
|
||||
|
||||
static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
|
||||
|
||||
int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
|
||||
{
|
||||
if (!security_binder_transfer_binder_ptr)
|
||||
|
@ -192,9 +123,49 @@ int security_binder_transfer_binder(struct task_struct *from, struct task_struct
|
|||
return security_binder_transfer_binder_ptr(from, to);
|
||||
}
|
||||
|
||||
static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL;
|
||||
|
||||
int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
|
||||
{
|
||||
if (!security_binder_transfer_file_ptr)
|
||||
security_binder_transfer_file_ptr = kallsyms_lookup_name_wrapper("security_binder_transfer_file");
|
||||
return security_binder_transfer_file_ptr(from, to, file);
|
||||
}
|
||||
|
||||
static int (*task_work_add_ptr)(struct task_struct *task, struct callback_head *work,
|
||||
enum task_work_notify_mode notify) = NULL;
|
||||
|
||||
int task_work_add(struct task_struct *task, struct callback_head *work,
|
||||
enum task_work_notify_mode notify)
|
||||
{
|
||||
if (!task_work_add_ptr)
|
||||
task_work_add_ptr = kallsyms_lookup_name_wrapper("task_work_add");
|
||||
return task_work_add_ptr(task, work, notify);
|
||||
}
|
||||
|
||||
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL;
|
||||
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
|
||||
{
|
||||
if (!zap_page_range_ptr)
|
||||
zap_page_range_ptr = kallsyms_lookup_name_wrapper("zap_page_range");
|
||||
zap_page_range_ptr(vma, address, size);
|
||||
}
|
||||
|
||||
static void (*put_ipc_ns_ptr)(struct ipc_namespace *ns) = NULL;
|
||||
|
||||
void put_ipc_ns(struct ipc_namespace *ns)
|
||||
{
|
||||
if (!put_ipc_ns_ptr)
|
||||
put_ipc_ns_ptr = kallsyms_lookup_name_wrapper("put_ipc_ns");
|
||||
put_ipc_ns_ptr(ns);
|
||||
}
|
||||
|
||||
static struct ipc_namespace *init_ipc_ns_ptr = NULL;
|
||||
|
||||
struct ipc_namespace *get_init_ipc_ns_ptr(void)
|
||||
{
|
||||
if (!init_ipc_ns_ptr)
|
||||
init_ipc_ns_ptr = kallsyms_lookup_name_wrapper("init_ipc_ns");
|
||||
return init_ipc_ns_ptr;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/ipc_namespace.h>
|
||||
|
||||
struct ipc_namespace* get_init_ipc_ns_ptr(void);
|
Loading…
Reference in New Issue