Import kernel module source from original Anbox repository

This commit is contained in:
Simon Fels 2018-06-08 17:17:43 +02:00
commit f3d77d660c
27 changed files with 6919 additions and 0 deletions

2
99-anbox.rules Normal file
View File

@ -0,0 +1,2 @@
KERNEL=="ashmem", NAME="%k", MODE="0666"
KERNEL=="binder*", NAME="%k", MODE="0666"

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# Anbox Kernel Modules
This repository contains the kernel modules necessary to run the Anbox
Android container runtime. They're split out of the original Anbox
repository to make packaging in various Linux distributions easier.

2
anbox.conf Normal file
View File

@ -0,0 +1,2 @@
ashmem_linux
binder_linux

14
ashmem/Makefile Normal file
View File

@ -0,0 +1,14 @@
ccflags-y += -I$(src) -Wno-error=implicit-int -Wno-int-conversion
obj-m := ashmem_linux.o
ashmem_linux-y := deps.o ashmem.o
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
all:
$(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD
install:
cp ashmem_linux.ko $(DESTDIR)/
clean:
rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions

889
ashmem/ashmem.c Normal file
View File

@ -0,0 +1,889 @@
/* mm/ashmem.c
*
* Anonymous Shared Memory Subsystem, ashmem
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "ashmem: " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include <linux/version.h>
#include "ashmem.h"
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
/**
* struct ashmem_area - The anonymous shared memory area
* @name: The optional name in /proc/pid/maps
* @unpinned_list: The list of all ashmem areas
* @file: The shmem-based backing file
* @size: The size of the mapping, in bytes
* @prot_mask: The allowed protection bits, as vm_flags
*
* The lifecycle of this structure is from our parent file's open() until
* its release(). It is also protected by 'ashmem_mutex'
*
* Warning: Mappings do NOT pin this structure; It dies on close()
*/
struct ashmem_area {
char name[ASHMEM_FULL_NAME_LEN];
struct list_head unpinned_list;
struct file *file;
size_t size;
unsigned long prot_mask;
};
/**
* struct ashmem_range - A range of unpinned/evictable pages
* @lru: The entry in the LRU list
* @unpinned: The entry in its area's unpinned list
* @asma: The associated anonymous shared memory area.
* @pgstart: The starting page (inclusive)
* @pgend: The ending page (inclusive)
* @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
*
* The lifecycle of this structure is from unpin to pin.
* It is protected by 'ashmem_mutex'
*/
struct ashmem_range {
struct list_head lru;
struct list_head unpinned;
struct ashmem_area *asma;
size_t pgstart;
size_t pgend;
unsigned int purged;
};
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
/*
* long lru_count - The count of pages on our LRU list.
*
* This is protected by ashmem_mutex.
*/
static unsigned long lru_count;
/*
* ashmem_mutex - protects the list of and each individual ashmem_area
*
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
*/
static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define range_size(range) \
((range)->pgend - (range)->pgstart + 1)
#define range_on_lru(range) \
((range)->purged == ASHMEM_NOT_PURGED)
#define page_range_subsumes_range(range, start, end) \
(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
#define page_range_subsumed_by_range(range, start, end) \
(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
#define page_in_range(range, page) \
(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
#define page_range_in_range(range, start, end) \
(page_in_range(range, start) || page_in_range(range, end) || \
page_range_subsumes_range(range, start, end))
#define range_before_page(range, page) \
((range)->pgend < (page))
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
/**
* lru_add() - Adds a range of memory to the LRU list
* @range: The memory range being added.
*
* The range is first added to the end (tail) of the LRU list.
* After this, the size of the range is added to @lru_count
*/
static inline void lru_add(struct ashmem_range *range)
{
list_add_tail(&range->lru, &ashmem_lru_list);
lru_count += range_size(range);
}
/**
* lru_del() - Removes a range of memory from the LRU list
* @range: The memory range being removed
*
* The range is first deleted from the LRU list.
* After this, the size of the range is removed from @lru_count
*/
static inline void lru_del(struct ashmem_range *range)
{
list_del(&range->lru);
lru_count -= range_size(range);
}
/**
* range_alloc() - Allocates and initializes a new ashmem_range structure
* @asma: The associated ashmem_area
* @prev_range: The previous ashmem_range in the sorted asma->unpinned list
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
* @start: The starting page (inclusive)
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
*
* Return: 0 if successful, or -ENOMEM if there is an error
*/
static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged,
size_t start, size_t end)
{
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (unlikely(!range))
return -ENOMEM;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
range->purged = purged;
list_add_tail(&range->unpinned, &prev_range->unpinned);
if (range_on_lru(range))
lru_add(range);
return 0;
}
/**
* range_del() - Deletes and dealloctes an ashmem_range structure
* @range: The associated ashmem_range that has previously been allocated
*/
static void range_del(struct ashmem_range *range)
{
list_del(&range->unpinned);
if (range_on_lru(range))
lru_del(range);
kmem_cache_free(ashmem_range_cachep, range);
}
/**
* range_shrink() - Shrinks an ashmem_range
* @range: The associated ashmem_range being shrunk
* @start: The starting byte of the new range
* @end: The ending byte of the new range
*
* This does not modify the data inside the existing range in any way - It
* simply shrinks the boundaries of the range.
*
* Theoretically, with a little tweaking, this could eventually be changed
* to range_resize, and expand the lru_count if the new range is larger.
*/
static inline void range_shrink(struct ashmem_range *range,
size_t start, size_t end)
{
size_t pre = range_size(range);
range->pgstart = start;
range->pgend = end;
if (range_on_lru(range))
lru_count -= pre - range_size(range);
}
/**
* ashmem_open() - Opens an Anonymous Shared Memory structure
* @inode: The backing file's index node(?)
* @file: The backing file
*
* Please note that the ashmem_area is not returned by this function - It is
* instead written to "file->private_data".
*
* Return: 0 if successful, or another code if unsuccessful.
*/
static int ashmem_open(struct inode *inode, struct file *file)
{
struct ashmem_area *asma;
int ret;
ret = generic_file_open(inode, file);
if (unlikely(ret))
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
if (unlikely(!asma))
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
asma->prot_mask = PROT_MASK;
file->private_data = asma;
return 0;
}
/**
* ashmem_release() - Releases an Anonymous Shared Memory structure
* @ignored: The backing file's Index Node(?) - It is ignored here.
* @file: The backing file
*
* Return: 0 if successful. If it is anything else, go have a coffee and
* try again.
*/
static int ashmem_release(struct inode *ignored, struct file *file)
{
struct ashmem_area *asma = file->private_data;
struct ashmem_range *range, *next;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
range_del(range);
mutex_unlock(&ashmem_mutex);
if (asma->file)
fput(asma->file);
kmem_cache_free(ashmem_area_cachep, asma);
return 0;
}
/**
* ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
* @file: The associated backing file.
* @buf: The buffer of data being written to
* @len: The number of bytes being read
* @pos: The position of the first byte to read.
*
* Return: 0 if successful, or another return code if not.
*/
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
goto out_unlock;
if (!asma->file) {
ret = -EBADF;
goto out_unlock;
}
mutex_unlock(&ashmem_mutex);
/*
* asma and asma->file are used outside the lock here. We assume
* once asma->file is set it will never be changed, and will not
* be destroyed until all references to the file are dropped and
* ashmem_release is called.
*
* kernel_read supersedes vfs_read from kernel version 3.9
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
ret = __vfs_read(asma->file, buf, len, pos);
#else
ret = kernel_read(asma->file, buf, len, pos);
#endif
if (ret >= 0)
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
return ret;
out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
{
struct ashmem_area *asma = file->private_data;
int ret;
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
ret = -EINVAL;
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
goto out;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
if (unlikely(!asma->size)) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
calc_vm_prot_bits(PROT_MASK, 0))) {
#else
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
#endif
ret = -EPERM;
goto out;
}
vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
/* ... and allocate the backing shmem file */
vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
if (IS_ERR(vmfile)) {
ret = PTR_ERR(vmfile);
goto out;
}
asma->file = vmfile;
}
get_file(asma->file);
/*
* XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
ret = shmem_zero_setup(vma);
if (ret) {
fput(asma->file);
goto out;
}
}
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = asma->file;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c
*
* 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
* Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
vfs_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
return freed;
}
static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* note that lru_count is count of pages on the lru, not a count of
* objects on the list. This means the scan function needs to return the
* number of pages freed, not the number of objects scanned.
*/
return lru_count;
}
static struct shrinker ashmem_shrinker = {
.count_objects = ashmem_shrink_count,
.scan_objects = ashmem_shrink_scan,
/*
* XXX (dchinner): I wish people would comment on why they need on
* significant changes to the default value here
*/
.seeks = DEFAULT_SEEKS * 4,
};
static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
{
int ret = 0;
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
if (unlikely((asma->prot_mask & prot) != prot)) {
ret = -EINVAL;
goto out;
}
/* does the application expect PROT_READ to imply PROT_EXEC? */
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
asma->prot_mask = prot;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static int set_name(struct ashmem_area *asma, void __user *name)
{
int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
* an data abort which would try to access mmap_sem. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
if (len == ASHMEM_NAME_LEN)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (unlikely(asma->file))
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
mutex_unlock(&ashmem_mutex);
return ret;
}
static int get_name(struct ashmem_area *asma, void __user *name)
{
int ret = 0;
size_t len;
/*
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
* wait for mmap_sem, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
mutex_lock(&ashmem_mutex);
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
/*
* Copying only `len', instead of ASHMEM_NAME_LEN, bytes
* prevents us from revealing one user's stack to another.
*/
len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
} else {
len = sizeof(ASHMEM_NAME_DEF);
memcpy(local_name, ASHMEM_NAME_DEF, len);
}
mutex_unlock(&ashmem_mutex);
/*
* Now we are just copying from the stack variable to userland
* No lock held
*/
if (unlikely(copy_to_user(name, local_name, len)))
ret = -EFAULT;
return ret;
}
/*
* ashmem_pin - pin the given ashmem region, returning whether it was
* previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* moved past last applicable page; we can short circuit */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to pin pages that span multiple ranges,
* or to pin pages that aren't even unpinned, so this is messy.
*
* Four cases:
* 1. The requested range subsumes an existing range, so we
* just remove the entire matching range.
* 2. The requested range overlaps the start of an existing
* range, so we just update that range.
* 3. The requested range overlaps the end of an existing
* range, so we just update that range.
* 4. The requested range punches a hole in an existing range,
* so we have to update one side of the range and then
* create a new range for the other side.
*/
if (page_range_in_range(range, pgstart, pgend)) {
ret |= range->purged;
/* Case #1: Easy. Just nuke the whole thing. */
if (page_range_subsumes_range(range, pgstart, pgend)) {
range_del(range);
continue;
}
/* Case #2: We overlap from the start, so adjust it */
if (range->pgstart >= pgstart) {
range_shrink(range, pgend + 1, range->pgend);
continue;
}
/* Case #3: We overlap from the rear, so adjust it */
if (range->pgend <= pgend) {
range_shrink(range, range->pgstart,
pgstart - 1);
continue;
}
/*
* Case #4: We eat a chunk out of the middle. A bit
* more complicated, we allocate a new range for the
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
pgend + 1, range->pgend);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
}
return ret;
}
/*
* ashmem_unpin - unpin the given range of pages. Returns zero on success.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
restart:
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* short circuit: this is our insertion point */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to unpin pages that are already entirely
* or partially pinned. We handle those two cases here.
*/
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
pgstart = min_t(size_t, range->pgstart, pgstart);
pgend = max_t(size_t, range->pgend, pgend);
purged |= range->purged;
range_del(range);
goto restart;
}
}
return range_alloc(asma, range, purged, pgstart, pgend);
}
/*
* ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
* given interval are unpinned and ASHMEM_IS_PINNED otherwise.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
size_t pgend)
{
struct ashmem_range *range;
int ret = ASHMEM_IS_PINNED;
list_for_each_entry(range, &asma->unpinned_list, unpinned) {
if (range_before_page(range, pgstart))
break;
if (page_range_in_range(range, pgstart, pgend)) {
ret = ASHMEM_IS_UNPINNED;
break;
}
}
return ret;
}
static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
void __user *p)
{
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
if (unlikely(!asma->file))
return -EINVAL;
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
return -EINVAL;
if (unlikely(((__u32)-1) - pin.offset < pin.len))
return -EINVAL;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
return -EINVAL;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
mutex_lock(&ashmem_mutex);
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
break;
case ASHMEM_UNPIN:
ret = ashmem_unpin(asma, pgstart, pgend);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
break;
}
mutex_unlock(&ashmem_mutex);
return ret;
}
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ashmem_area *asma = file->private_data;
long ret = -ENOTTY;
switch (cmd) {
case ASHMEM_SET_NAME:
ret = set_name(asma, (void __user *)arg);
break;
case ASHMEM_GET_NAME:
ret = get_name(asma, (void __user *)arg);
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
break;
case ASHMEM_SET_PROT_MASK:
ret = set_prot_mask(asma, arg);
break;
case ASHMEM_GET_PROT_MASK:
ret = asma->prot_mask;
break;
case ASHMEM_PIN:
case ASHMEM_UNPIN:
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
break;
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
.nr_to_scan = LONG_MAX,
};
ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
return ret;
}
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case COMPAT_ASHMEM_SET_SIZE:
cmd = ASHMEM_SET_SIZE;
break;
case COMPAT_ASHMEM_SET_PROT_MASK:
cmd = ASHMEM_SET_PROT_MASK;
break;
}
return ashmem_ioctl(file, cmd, arg);
}
#endif
static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
.read = ashmem_read,
.llseek = ashmem_llseek,
.mmap = ashmem_mmap,
.unlocked_ioctl = ashmem_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ashmem_ioctl,
#endif
};
static struct miscdevice ashmem_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ashmem",
.fops = &ashmem_fops,
};
static int __init ashmem_init(void)
{
int ret;
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
if (unlikely(!ashmem_area_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
if (unlikely(!ashmem_range_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ret = misc_register(&ashmem_misc);
if (unlikely(ret)) {
pr_err("failed to register misc device!\n");
return ret;
}
register_shrinker(&ashmem_shrinker);
return 0;
}
static void __exit ashmem_exit(void)
{
unregister_shrinker(&ashmem_shrinker);
misc_deregister(&ashmem_misc);
kmem_cache_destroy(ashmem_range_cachep);
kmem_cache_destroy(ashmem_area_cachep);
}
module_init(ashmem_init);
module_exit(ashmem_exit);
MODULE_LICENSE("GPL");

27
ashmem/ashmem.h Normal file
View File

@ -0,0 +1,27 @@
/*
* include/linux/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
*
* This file is dual licensed. It may be redistributed and/or modified
* under the terms of the Apache 2.0 License OR version 2 of the GNU
* General Public License.
*/
#ifndef _LINUX_ASHMEM_H
#define _LINUX_ASHMEM_H
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/compat.h>
#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t)
#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int)
#endif
#endif /* _LINUX_ASHMEM_H */

11
ashmem/deps.c Normal file
View File

@ -0,0 +1,11 @@
#include <linux/mm.h>
#include <linux/kallsyms.h>
static int (*shmem_zero_setup_ptr)(struct vm_area_struct *) = NULL;
int shmem_zero_setup(struct vm_area_struct *vma)
{
if (!shmem_zero_setup_ptr)
shmem_zero_setup_ptr = kallsyms_lookup_name("shmem_zero_setup");
return shmem_zero_setup_ptr(vma);
}

7
ashmem/dkms.conf Normal file
View File

@ -0,0 +1,7 @@
PACKAGE_NAME="anbox-ashmem"
PACKAGE_VERSION="1"
CLEAN="make clean"
MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build"
BUILT_MODULE_NAME[0]="ashmem_linux"
DEST_MODULE_LOCATION[0]="/updates"
AUTOINSTALL="yes"

47
ashmem/uapi/ashmem.h Normal file
View File

@ -0,0 +1,47 @@
/*
* drivers/staging/android/uapi/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
*
* This file is dual licensed. It may be redistributed and/or modified
* under the terms of the Apache 2.0 License OR version 2 of the GNU
* General Public License.
*/
#ifndef _UAPI_LINUX_ASHMEM_H
#define _UAPI_LINUX_ASHMEM_H
#include <linux/ioctl.h>
#define ASHMEM_NAME_LEN 256
#define ASHMEM_NAME_DEF "dev/ashmem"
/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
#define ASHMEM_NOT_PURGED 0
#define ASHMEM_WAS_PURGED 1
/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
#define ASHMEM_IS_UNPINNED 0
#define ASHMEM_IS_PINNED 1
struct ashmem_pin {
__u32 offset; /* offset into region, in bytes, page-aligned */
__u32 len; /* length forward from offset, in bytes, page-aligned */
};
#define __ASHMEMIOC 0x77
#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
#endif /* _UAPI_LINUX_ASHMEM_H */

14
binder/Makefile Normal file
View File

@ -0,0 +1,14 @@
ccflags-y += -I$(src) -Wno-int-conversion -DCONFIG_ANDROID_BINDER_DEVICES="\"binder\""
obj-m := binder_linux.o
binder_linux-y := deps.o binder.o
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
all:
$(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD
install:
cp binder_linux.ko $(DESTDIR)/
clean:
rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions

4408
binder/binder.c Normal file

File diff suppressed because it is too large Load Diff

450
binder/binder.h Normal file
View File

@ -0,0 +1,450 @@
/*
* Copyright (C) 2008 Google, Inc.
*
* Based on, but no longer compatible with, the original
* OpenBinder.org binder driver interface, which is:
*
* Copyright (c) 2005 Palmsource, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_BINDER_H
#define _UAPI_LINUX_BINDER_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
};
#ifdef BINDER_IPC_32BIT
typedef __u32 binder_size_t;
typedef __u32 binder_uintptr_t;
#else
typedef __u64 binder_size_t;
typedef __u64 binder_uintptr_t;
#endif
/**
* struct binder_object_header - header shared by all binder metadata objects.
* @type: type of the object
*/
struct binder_object_header {
__u32 type;
};
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur. The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
struct binder_object_header hdr;
__u32 flags;
/* 8 bytes of data. */
union {
binder_uintptr_t binder; /* local object */
__u32 handle; /* remote object */
};
/* extra data associated with local object */
binder_uintptr_t cookie;
};
/**
* struct binder_fd_object - describes a filedescriptor to be fixed up.
* @hdr: common header structure
* @pad_flags: padding to remain compatible with old userspace code
* @pad_binder: padding to remain compatible with old userspace code
* @fd: file descriptor
* @cookie: opaque data, used by user-space
*/
struct binder_fd_object {
struct binder_object_header hdr;
__u32 pad_flags;
union {
binder_uintptr_t pad_binder;
__u32 fd;
};
binder_uintptr_t cookie;
};
/* struct binder_buffer_object - object describing a userspace buffer
* @hdr: common header structure
* @flags: one or more BINDER_BUFFER_* flags
* @buffer: address of the buffer
* @length: length of the buffer
* @parent: index in offset array pointing to parent buffer
* @parent_offset: offset in @parent pointing to this buffer
*
* A binder_buffer object represents an object that the
* binder kernel driver can copy verbatim to the target
* address space. A buffer itself may be pointed to from
* within another buffer, meaning that the pointer inside
* that other buffer needs to be fixed up as well. This
* can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
* flag in @flags, by setting @parent buffer to the index
* in the offset array pointing to the parent binder_buffer_object,
* and by setting @parent_offset to the offset in the parent buffer
* at which the pointer to this buffer is located.
*/
struct binder_buffer_object {
struct binder_object_header hdr;
__u32 flags;
binder_uintptr_t buffer;
binder_size_t length;
binder_size_t parent;
binder_size_t parent_offset;
};
enum {
BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
};
/* struct binder_fd_array_object - object describing an array of fds in a buffer
* @hdr: common header structure
* @num_fds: number of file descriptors in the buffer
* @parent: index in offset array to buffer holding the fd array
* @parent_offset: start offset of fd array in the buffer
*
* A binder_fd_array object represents an array of file
* descriptors embedded in a binder_buffer_object. It is
* different from a regular binder_buffer_object because it
* describes a list of file descriptors to fix up, not an opaque
* blob of memory, and hence the kernel needs to treat it differently.
*
* An example of how this would be used is with Android's
* native_handle_t object, which is a struct with a list of integers
* and a list of file descriptors. The native_handle_t struct itself
* will be represented by a struct binder_buffer_objct, whereas the
* embedded list of file descriptors is represented by a
* struct binder_fd_array_object with that binder_buffer_object as
* a parent.
*/
struct binder_fd_array_object {
struct binder_object_header hdr;
binder_size_t num_fds;
binder_size_t parent;
binder_size_t parent_offset;
};
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses appropriately.
*/
struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
__s32 protocol_version;
};
/* This is the current protocol version. */
#ifdef BINDER_IPC_32BIT
#define BINDER_CURRENT_PROTOCOL_VERSION 7
#else
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
/*
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
* ECONNREFUSED -- The driver is no longer accepting operations
* from your process. That is, the process is being destroyed.
* You should handle this by exiting from your process. Note
* that once this error code is returned, all further calls to
* the driver from any thread will return this same code.
*/
enum transaction_flags {
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
};
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32 handle;
/* target descriptor of return transaction */
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie; /* target object cookie */
__u32 code; /* transaction command */
/* General information about the transaction. */
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t buffer;
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
};
struct binder_ptr_cookie {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
};
struct binder_handle_cookie {
__u32 handle;
binder_uintptr_t cookie;
} __packed;
struct binder_pri_desc {
__s32 priority;
__u32 desc;
};
struct binder_pri_ptr_cookie {
__s32 priority;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
};
enum binder_driver_return_protocol {
BR_ERROR = _IOR('r', 0, __s32),
/*
* int: error code
*/
BR_OK = _IO('r', 1),
/* No parameters! */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
* Else the remote object has acquired a primary reference.
*/
BR_DEAD_REPLY = _IO('r', 5),
/*
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
*/
BR_TRANSACTION_COMPLETE = _IO('r', 6),
/*
* No parameters... always refers to the last transaction requested
* (including replies). Note that this will be sent even for
* asynchronous transactions.
*/
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
/*
* not currently supported
* int: priority
* void *: ptr to binder
* void *: cookie for binder
*/
BR_NOOP = _IO('r', 12),
/*
* No parameters. Do nothing and examine the next command. It exists
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
*/
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
* threads waiting to service incoming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
BR_FINISHED = _IO('r', 14),
/*
* not currently supported
* stop threadpool thread
*/
BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
/*
* void *: cookie
*/
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
/*
* void *: cookie
*/
BR_FAILED_REPLY = _IO('r', 17),
/*
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
enum binder_driver_command_protocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
* binder_transaction_data: the sent command.
*/
BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
/*
* void *: ptr to transaction data received on a read
*/
BC_INCREFS = _IOW('c', 4, __u32),
BC_ACQUIRE = _IOW('c', 5, __u32),
BC_RELEASE = _IOW('c', 6, __u32),
BC_DECREFS = _IOW('c', 7, __u32),
/*
* int: descriptor
*/
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
/*
* not currently supported
* int: priority
* int: descriptor
*/
BC_REGISTER_LOOPER = _IO('c', 11),
/*
* No parameters.
* Register a spawned looper thread with the device.
*/
BC_ENTER_LOOPER = _IO('c', 12),
BC_EXIT_LOOPER = _IO('c', 13),
/*
* No parameters.
* These two commands are sent as an application-level thread
* enters and exits the binder loop, respectively. They are
* used so the binder can have an accurate count of the number
* of looping threads it has available.
*/
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
struct binder_handle_cookie),
/*
* int: handle
* void *: cookie
*/
BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
/*
* void *: cookie
*/
BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
/*
* binder_transaction_data_sg: the sent command.
*/
};
#endif /* _UAPI_LINUX_BINDER_H */

329
binder/binder_trace.h Normal file
View File

@ -0,0 +1,329 @@
/*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM binder
#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _BINDER_TRACE_H
#include <linux/tracepoint.h>
struct binder_buffer;
struct binder_node;
struct binder_proc;
struct binder_ref;
struct binder_thread;
struct binder_transaction;
TRACE_EVENT(binder_ioctl,
TP_PROTO(unsigned int cmd, unsigned long arg),
TP_ARGS(cmd, arg),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(unsigned long, arg)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->arg = arg;
),
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
DECLARE_EVENT_CLASS(binder_lock_class,
TP_PROTO(const char *tag),
TP_ARGS(tag),
TP_STRUCT__entry(
__field(const char *, tag)
),
TP_fast_assign(
__entry->tag = tag;
),
TP_printk("tag=%s", __entry->tag)
);
#define DEFINE_BINDER_LOCK_EVENT(name) \
DEFINE_EVENT(binder_lock_class, name, \
TP_PROTO(const char *func), \
TP_ARGS(func))
DEFINE_BINDER_LOCK_EVENT(binder_lock);
DEFINE_BINDER_LOCK_EVENT(binder_locked);
DEFINE_BINDER_LOCK_EVENT(binder_unlock);
DECLARE_EVENT_CLASS(binder_function_return_class,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field(int, ret)
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \
DEFINE_EVENT(binder_function_return_class, name, \
TP_PROTO(int ret), \
TP_ARGS(ret))
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
TP_STRUCT__entry(
__field(bool, proc_work)
__field(bool, transaction_stack)
__field(bool, thread_todo)
),
TP_fast_assign(
__entry->proc_work = proc_work;
__entry->transaction_stack = transaction_stack;
__entry->thread_todo = thread_todo;
),
TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
__entry->proc_work, __entry->transaction_stack,
__entry->thread_todo)
);
TRACE_EVENT(binder_transaction,
TP_PROTO(bool reply, struct binder_transaction *t,
struct binder_node *target_node),
TP_ARGS(reply, t, target_node),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, target_node)
__field(int, to_proc)
__field(int, to_thread)
__field(int, reply)
__field(unsigned int, code)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->target_node = target_node ? target_node->debug_id : 0;
__entry->to_proc = t->to_proc->pid;
__entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
__entry->reply = reply;
__entry->code = t->code;
__entry->flags = t->flags;
),
TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
__entry->debug_id, __entry->target_node,
__entry->to_proc, __entry->to_thread,
__entry->reply, __entry->flags, __entry->code)
);
TRACE_EVENT(binder_transaction_received,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t),
TP_STRUCT__entry(
__field(int, debug_id)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
),
TP_printk("transaction=%d", __entry->debug_id)
);
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref *ref),
TP_ARGS(t, node, ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
__field(binder_uintptr_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
(u64)__entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
TRACE_EVENT(binder_transaction_ref_to_node,
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
TP_ARGS(t, ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
__field(binder_uintptr_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
__entry->ref_debug_id, __entry->ref_desc,
(u64)__entry->node_ptr)
);
TRACE_EVENT(binder_transaction_ref_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
struct binder_ref *dest_ref),
TP_ARGS(t, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
__field(int, src_ref_debug_id)
__field(uint32_t, src_ref_desc)
__field(int, dest_ref_debug_id)
__field(uint32_t, dest_ref_desc)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = src_ref->node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
__entry->dest_ref_desc = dest_ref->desc;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
__entry->src_ref_debug_id, __entry->src_ref_desc,
__entry->dest_ref_debug_id, __entry->dest_ref_desc)
);
TRACE_EVENT(binder_transaction_fd,
TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
TP_ARGS(t, src_fd, dest_fd),
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, src_fd)
__field(int, dest_fd)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->src_fd = src_fd;
__entry->dest_fd = dest_fd;
),
TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
__entry->debug_id, __entry->src_fd, __entry->dest_fd)
);
DECLARE_EVENT_CLASS(binder_buffer_class,
TP_PROTO(struct binder_buffer *buf),
TP_ARGS(buf),
TP_STRUCT__entry(
__field(int, debug_id)
__field(size_t, data_size)
__field(size_t, offsets_size)
),
TP_fast_assign(
__entry->debug_id = buf->debug_id;
__entry->data_size = buf->data_size;
__entry->offsets_size = buf->offsets_size;
),
TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
__entry->debug_id, __entry->data_size, __entry->offsets_size)
);
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_PROTO(struct binder_buffer *buffer),
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_proc *proc, bool allocate,
void *start, void *end),
TP_ARGS(proc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
__field(size_t, offset)
__field(size_t, size)
),
TP_fast_assign(
__entry->proc = proc->pid;
__entry->allocate = allocate;
__entry->offset = start - proc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
__entry->proc, __entry->allocate,
__entry->offset, __entry->size)
);
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, cmd)
),
TP_fast_assign(
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x %s",
__entry->cmd,
_IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
binder_command_strings[_IOC_NR(__entry->cmd)] :
"unknown")
);
TRACE_EVENT(binder_return,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, cmd)
),
TP_fast_assign(
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x %s",
__entry->cmd,
_IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
binder_return_strings[_IOC_NR(__entry->cmd)] :
"unknown")
);
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE binder_trace
#include <trace/define_trace.h>

142
binder/deps.c Normal file
View File

@ -0,0 +1,142 @@
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/atomic.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/version.h>
static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL;
#else
static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = NULL;
#endif
static int (*map_kernel_range_noflush_ptr)(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) = NULL;
static void (*unmap_kernel_range_ptr)(unsigned long, unsigned long) = NULL;
static struct files_struct *(*get_files_struct_ptr)(struct task_struct *) = NULL;
static void (*put_files_struct_ptr)(struct files_struct *) = NULL;
static struct sighand_struct *(*__lock_task_sighand_ptr)(struct task_struct *, unsigned long *) = NULL;
static int (*__alloc_fd_ptr)(struct files_struct *files, unsigned start, unsigned end, unsigned flags) = NULL;
static void (*__fd_install_ptr)(struct files_struct *files, unsigned int fd, struct file *file) = NULL;
static int (*__close_fd_ptr)(struct files_struct *files, unsigned int fd) = NULL;
static int (*can_nice_ptr)(const struct task_struct *, const int) = NULL;
static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL;
static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL;
static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL;
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
if (!get_vm_area_ptr)
get_vm_area_ptr = kallsyms_lookup_name("get_vm_area");
return get_vm_area_ptr(size, flags);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
#else
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details)
#endif
{
if (!zap_page_range_ptr)
zap_page_range_ptr = kallsyms_lookup_name("zap_page_range");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
zap_page_range_ptr(vma, address, size);
#else
zap_page_range_ptr(vma, address, size, details);
#endif
}
int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages)
{
if (!map_kernel_range_noflush_ptr)
map_kernel_range_noflush_ptr = kallsyms_lookup_name("map_kernel_range_noflush");
return map_kernel_range_noflush_ptr(start, size, prot, pages);
}
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
if (!unmap_kernel_range_ptr)
unmap_kernel_range_ptr = kallsyms_lookup_name("unmap_kernel_range");
unmap_kernel_range_ptr(addr, size);
}
struct files_struct *get_files_struct(struct task_struct *task)
{
if (!get_files_struct_ptr)
get_files_struct_ptr = kallsyms_lookup_name("get_files_struct");
return get_files_struct_ptr(task);
}
void put_files_struct(struct files_struct *files)
{
if (!put_files_struct_ptr)
put_files_struct_ptr = kallsyms_lookup_name("put_files_struct");
put_files_struct_ptr(files);
}
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
{
if (!__lock_task_sighand_ptr)
__lock_task_sighand_ptr = kallsyms_lookup_name("__lock_task_sighand");
return __lock_task_sighand_ptr(tsk, flags);
}
int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags)
{
if (!__alloc_fd_ptr)
__alloc_fd_ptr = kallsyms_lookup_name("__alloc_fd");
return __alloc_fd_ptr(files, start, end, flags);
}
void __fd_install(struct files_struct *files, unsigned int fd, struct file *file)
{
if (!__fd_install_ptr)
__fd_install_ptr = kallsyms_lookup_name("__fd_install");
__fd_install_ptr(files, fd, file);
}
int __close_fd(struct files_struct *files, unsigned int fd)
{
if (!__close_fd_ptr)
__close_fd_ptr = kallsyms_lookup_name("__close_fd_ptr");
return __close_fd_ptr(files, fd);
}
int can_nice(const struct task_struct *p, const int nice)
{
if (!can_nice_ptr)
can_nice_ptr = kallsyms_lookup_name("can_nice");
return can_nice_ptr(p, nice);
}
int security_binder_set_context_mgr(struct task_struct *mgr)
{
if (!security_binder_set_context_mgr_ptr)
security_binder_set_context_mgr_ptr = kallsyms_lookup_name("security_binder_set_context_mgr");
return security_binder_set_context_mgr_ptr(mgr);
}
int security_binder_transaction(struct task_struct *from, struct task_struct *to)
{
if (!security_binder_transaction_ptr)
security_binder_transaction_ptr = kallsyms_lookup_name("security_binder_transaction");
return security_binder_transaction_ptr(from, to);
}
int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
{
if (!security_binder_transfer_binder_ptr)
security_binder_transfer_binder_ptr = kallsyms_lookup_name("security_binder_transfer_binder");
return security_binder_transfer_binder_ptr(from, to);
}
int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
{
if (!security_binder_transfer_file_ptr)
security_binder_transfer_file_ptr = kallsyms_lookup_name("security_binder_transfer_file");
return security_binder_transfer_file_ptr(from, to, file);
}

7
binder/dkms.conf Normal file
View File

@ -0,0 +1,7 @@
PACKAGE_NAME="anbox-binder"
PACKAGE_VERSION="1"
CLEAN="make clean"
MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build"
BUILT_MODULE_NAME[0]="binder_linux"
DEST_MODULE_LOCATION[0]="/updates"
AUTOINSTALL="yes"

5
debian/README.Debian vendored Normal file
View File

@ -0,0 +1,5 @@
MODULE_NAME DKMS module for Debian
This package was automatically generated by the DKMS system,
for distribution on Debian based operating systems.

3
debian/anbox-modules-dkms.install vendored Normal file
View File

@ -0,0 +1,3 @@
usr/src/anbox-modules-*
etc/modules-load.d
lib/udev/rules.d

51
debian/anbox-modules-dkms.postinst vendored Executable file
View File

@ -0,0 +1,51 @@
#!/bin/sh
# Copyright (C) 2002-2005 Flavio Stanchina
# Copyright (C) 2005-2006 Aric Cyr
# Copyright (C) 2007 Mario Limonciello
# Copyright (C) 2009 Alberto Milone
set -e
NAME=anbox-modules
PACKAGE_NAME=$NAME-dkms
DEB_NAME=$(echo $PACKAGE_NAME | sed 's,_,-,')
CVERSION=`dpkg-query -W -f='${Version}' $DEB_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
ARCH=`dpkg --print-architecture`
dkms_configure () {
for POSTINST in /usr/lib/dkms/common.postinst "/usr/share/$PACKAGE_NAME/postinst"; do
if [ -f "$POSTINST" ]; then
for d in ashmem binder ; do
"$POSTINST" "$NAME-$d" "$CVERSION" "/usr/share/$PACKAGE_NAME" "$ARCH" "$2"
done
return 0
fi
echo "WARNING: $POSTINST does not exist." >&2
done
echo "ERROR: DKMS version is too old and $PACKAGE_NAME was not" >&2
echo "built with legacy DKMS support." >&2
echo "You must either rebuild $PACKAGE_NAME with legacy postinst" >&2
echo "support or upgrade DKMS to a more current version." >&2
return 1
}
case "$1" in
configure)
dkms_configure
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

28
debian/anbox-modules-dkms.prerm vendored Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
NAME=anbox-modules-dkms
VERSION=`dpkg-query -W -f='${Version}' $DEB_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
set -e
case "$1" in
remove|upgrade|deconfigure)
if [ "`dkms status -m $NAME`" ]; then
dkms remove -m $NAME -v $VERSION --all
fi
;;
failed-upgrade)
;;
*)
echo "prerm called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

51
debian/changelog vendored Normal file
View File

@ -0,0 +1,51 @@
anbox (10) bionic; urgency=medium
* Bump version
-- Simon Fels <morphis@gravedo.de> Wed, 23 May 2018 11:05:05 +0200
anbox (8) artful; urgency=medium
* Drop upstart/systemctl session jobs
-- Simon Fels <morphis@gravedo.de> Fri, 14 Jul 2017 20:28:49 +0200
anbox (7) artful; urgency=medium
* Rebuild dkms modules with the kernel asked for and not the current
running kernel.
-- Simon Fels <morphis@gravedo.de> Thu, 15 Jun 2017 18:19:07 +0200
anbox (6) artful; urgency=medium
* Add anbox-common package which ships any additional files needed for
anbox but can't be installed with a snap as of today.
-- Simon Fels <morphis@gravedo.de> Sat, 29 Apr 2017 12:06:56 +0200
anbox (5) zesty; urgency=medium
* Rework packaging to also ship things we installed through the snap
based installer before.
-- Simon Fels <morphis@gravedo.de> Thu, 20 Apr 2017 19:58:22 +0200
anbox (4) zesty; urgency=medium
* Fetch dkms version from debian package in post-install step.
-- Simon Fels <morphis@gravedo.de> Thu, 23 Feb 2017 19:12:15 +0100
anbox (3) zesty; urgency=medium
* Use correct package version in our Makefile to avoid failing module
builds on the target device.
-- Simon Fels <morphis@gravedo.de> Tue, 21 Feb 2017 07:36:45 +0100
anbox (1) xenial; urgency=low
* Initial release.
-- Simon Fels <morphis@gravedo.de> Mon, 06 Feb 2017 21:43:58 +0100

1
debian/compat vendored Normal file
View File

@ -0,0 +1 @@
9

17
debian/control vendored Normal file
View File

@ -0,0 +1,17 @@
Source: anbox
Section: misc
Priority: optional
Maintainer: Simon Fels <morphis@gravedo.de>
Build-Depends: debhelper (>= 9), dkms, dh-systemd
Standards-Version: 4.1.3
Homepage: http://anbox.io
Vcs-Browser: https://github.com/anbox/anbox
Vcs-Git: https://github.com/anbox/anbox.git
Package: anbox-modules-dkms
Architecture: all
Depends: dkms (>= 1.95), ${misc:Depends}
Description: Android kernel driver (binder, ashmem) in DKMS format.
.
This package contains a out-of-tree version of the core Android
kernel functionalities binder and ashmem.

374
debian/copyright vendored Normal file
View File

@ -0,0 +1,374 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: anbox
Source: http://github.com/anbox/anbox
Files: *
License: GPL-3
Files: android/*
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: android/appmgr/src/org/anbox/appmgr/AppViewActivity.java
android/appmgr/src/org/anbox/appmgr/LauncherActivity.java
android/appmgr/src/org/anbox/appmgr/LauncherService.java
android/appmgr/src/org/anbox/appmgr/MainApplication.java
android/appmgr/src/org/anbox/appmgr/PackageEventReceiver.java
android/appmgr/src/org/anbox/appmgr/PlatformService.java
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: android/appmgr/src/org/anbox/appmgr/GridFragment.java
Copyright: 2012, Loupe Inc
License: Apache-2.0
Files: android/hwcomposer/*
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: android/service/*
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: cross-compile-chroot.sh
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: external/android-emugl/*
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: external/android-emugl/host/include/ETC1/*
Copyright: 2009, Google Inc
License: Apache-2.0
Files: external/android-emugl/host/libs/Translator/GLcommon/etc1.cpp
Copyright: 2009, Google Inc
License: Apache-2.0
Files: external/android-emugl/host/libs/Translator/include/GLcommon/etc1.h
Copyright: 2009, Google Inc
License: Apache-2.0
Files: external/nsexec/*
Copyright: 2012-2016, Canonical, Inc
License: GPL-2
Files: external/process-cpp-minimal/include/*
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: external/process-cpp-minimal/include/core/testing/*
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: external/process-cpp-minimal/include/core/testing/fork_and_run.h
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: external/process-cpp-minimal/src/*
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: external/process-cpp-minimal/src/core/posix/*
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: external/process-cpp-minimal/src/core/testing/fork_and_run.cpp
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: external/xdg/*
Copyright: 2015, Thomas Voß <thomas.voss.bochum@gmail.com>
License: LGPL-3+
Files: kernel/*
Copyright: 2008-2012 Google Inc.
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: kernel/ashmem/ashmem.c
Copyright: 2007, 2008, 2012, Google, Inc
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: kernel/ashmem/ashmem.h
Copyright: 2008, Google Inc
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: kernel/ashmem/uapi/*
Copyright: 2008, Google Inc
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: kernel/binder/binder.c
kernel/binder/binder_trace.h
Copyright: 2007, 2008, 2012, Google, Inc
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: kernel/binder/binder.h
Copyright: 2008, Google, Inc
2005, Palmsource, Inc
License: GPL-2
Copyright (C) 2008-2012 Google, Inc.
.
This software is licensed under the terms of the GNU General Public
License version 2, as published by the Free Software Foundation, and
may be copied, distributed, and modified under those terms.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Files: scripts/*
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: scripts/setup-partial-chroot.sh
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/*
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: src/anbox/cli.cpp
src/anbox/cli.h
src/anbox/not_reachable.cpp
src/anbox/not_reachable.h
src/anbox/optional.h
src/anbox/version.cpp
src/anbox/version.h
Copyright: 2016, Canonical, Ltd
License: LGPL-3
Files: src/anbox/cmds/system_info.cpp
src/anbox/cmds/system_info.h
Copyright: 2017, Simon Fels <morphis@gravedo.de>
License: LGPL-3
Files: src/anbox/cmds/version.cpp
src/anbox/cmds/version.h
Copyright: 2016, Canonical, Ltd
License: LGPL-3
Files: src/anbox/common/dispatcher.cpp
src/anbox/common/dispatcher.h
Copyright: 2016, Canonical, Ltd
License: LGPL-3
Files: src/anbox/common/fd.cpp
src/anbox/common/fd.h
src/anbox/common/variable_length_array.h
src/anbox/common/wait_handle.cpp
src/anbox/common/wait_handle.h
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/anbox/common/fd_sets.h
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: src/anbox/common/message_channel.cpp
src/anbox/common/message_channel.h
src/anbox/common/type_traits.h
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: src/anbox/common/scope_ptr.h
src/anbox/common/small_vector.h
Copyright: 2014, 2016, The Android Open Source Project
License: Apache-2.0
Files: src/anbox/dbus/codecs.h
Copyright: 2017, Simon Fels <morphis@gravedo.de>
License: LGPL-3
Files: src/anbox/graphics/buffer_queue.cpp
src/anbox/graphics/buffer_queue.h
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: src/anbox/graphics/emugl/*
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: src/anbox/graphics/emugl/DisplayManager.cpp
src/anbox/graphics/emugl/DisplayManager.h
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: src/anbox/graphics/emugl/Renderable.cpp
src/anbox/graphics/emugl/Renderable.h
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: Apache-2.0
Files: src/anbox/graphics/primitives.h
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/anbox/graphics/program_family.cpp
src/anbox/graphics/program_family.h
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: src/anbox/logger.cpp
src/anbox/logger.h
Copyright: 2015, Canonical, Ltd
License: GPL-3
Files: src/anbox/network/base_socket_messenger.cpp
src/anbox/network/base_socket_messenger.h
src/anbox/network/connection_context.h
src/anbox/network/connections.h
src/anbox/network/message_receiver.h
src/anbox/network/message_sender.h
src/anbox/network/socket_connection.cpp
src/anbox/network/socket_connection.h
src/anbox/network/socket_messenger.h
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: src/anbox/network/fd_socket_transmission.cpp
src/anbox/network/fd_socket_transmission.h
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/anbox/protobuf/*
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/anbox/rpc/channel.cpp
Copyright: 2016, Simon Fels <morphis@gravedo.de>
2012, Canonical Ltd
License: LGPL-3
Files: src/anbox/rpc/make_protobuf_object.h
src/anbox/rpc/pending_call_cache.cpp
src/anbox/rpc/pending_call_cache.h
Copyright: 2012-2016, Canonical Ltd
License: LGPL-3
Files: src/anbox/rpc/template_message_processor.h
Copyright: 2012-2015, Canonical Ltd
License: GPL-3
Files: src/anbox/testing/*
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: src/anbox/utils/*
Copyright: 2017, Simon Fels <morphis@gravedo.de>
License: LGPL-3
Files: tests/*
Copyright: 2016, 2017, Simon Fels <morphis@gravedo.de>
License: GPL-3
Files: tests/anbox/common/*
Copyright: 2014, 2016, The Android Open Source Project
License: Apache-2.0
Files: tests/anbox/common/message_channel_tests.cpp
tests/anbox/common/scope_ptr_tests.cpp
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
Files: tests/anbox/graphics/buffer_queue_tests.cpp
Copyright: 2008-2016, The Android Open Source Project
License: Apache-2.0
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS"BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
.
On Debian systems, the complete text of the Apache License,
Version 2.0 can be found in '/usr/share/common-licenses/Apache-2.0'.
License: GPL-2
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
.
On Debian systems, the complete text of version 2 of the GNU General
Public License can be found in '/usr/share/common-licenses/GPL-2'.
License: GPL-3
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 3 dated June, 2007.
.
On Debian systems, the complete text of version 3 of the GNU General
Public License can be found in '/usr/share/common-licenses/GPL-3'.
License: LGPL-3
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; version 3 of the License.
.
On Debian systems, the complete text of version 3 of the GNU Lesser
General Public License can be found in `/usr/share/common-licenses/LGPL-3'.
License: LGPL-3+
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; version 3 of the License, or (at
your option) any later version.
.
On Debian systems, the complete text of version 3 of the GNU Lesser
General Public License can be found in `/usr/share/common-licenses/LGPL-3'.

1
debian/dirs vendored Normal file
View File

@ -0,0 +1 @@
usr/src

31
debian/rules vendored Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
VERSION=$(shell dpkg-parsechangelog -SVersion)
# include /usr/share/dpkg/default.mk
%:
dh $@ --parallel --fail-missing --with systemd
override_dh_auto_configure:
override_dh_auto_build:
override_dh_install:
VERSION=$(shell dpkg-parsechangelog -SVersion)
install -d $(CURDIR)/debian/tmp/usr/src
for d in ashmem binder ; do \
cp -a $(CURDIR)/$$d $(CURDIR)/debian/tmp/usr/src/anbox-modules-$$d-$(VERSION) ; \
done
install -d $(CURDIR)/debian/tmp/lib/udev/rules.d
install -m 0644 99-anbox.rules $(CURDIR)/debian/tmp/lib/udev/rules.d
install -d $(CURDIR)/debian/tmp/etc/modules-load.d
install -m 0644 anbox.conf $(CURDIR)/debian/tmp/etc/modules-load.d
dh_install

1
debian/source/format vendored Normal file
View File

@ -0,0 +1 @@
3.0 (native)

2
debian/source/options vendored Normal file
View File

@ -0,0 +1,2 @@
tar-ignore = ".git"
tar-ignore = "*.swp"