调用链
process_vm_rw
->process_vm_rw_core
->process_vm_rw_single_vec
->process_vm_rw_pages
->copy_page_to_iter
->copy_page_to_iter_iovec
->copyout
核心代码
common/mm/process_vm_access.c
common/lib/iov_iter.c
版本号:android-4.14-stable
Linux manual page
process_vm_readv(2) - Linux manual page
1 |
|
定义
1 | SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, |
process_vm_access.c
process_vm_rw
1 | static ssize_t process_vm_rw(pid_t pid, |
检查iovecs
后,调用process_vm_rw_core
process_vm_rw_core
1 | /* Maximum number of entries for process pages array |
- 计算
nr_pages
后,与PVM_MAX_PP_ARRAY_COUNT
比较,大于则调用kalloc
申请内存,否则直接使用process_pages
。 - 调用
find_task_by_vpid
和get_task_struct
,获取进程的task_struct
。 - 调用
mm_access(task, PTRACE_MODE_ATTACH_REALCREDS)
获取mm_struct
(同时进行权限检查)。 - 遍历
rvec
,调用process_vm_rw_single_vec
process_vm_rw_single_vec
通过1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74/* Maximum number of pages kmalloc'd to hold struct page's during copy */
/**
* process_vm_rw_single_vec - read/write pages from task specified
* @addr: start memory address of target process
* @len: size of area to copy to/from
* @iter: where to copy to/from locally
* @process_pages: struct pages area that can store at least
* nr_pages_to_copy struct page pointers
* @mm: mm for task
* @task: task to read/write from
* @vm_write: 0 means copy from, 1 means copy to
* Returns 0 on success or on failure error code
*/
static int process_vm_rw_single_vec(unsigned long addr,
unsigned long len,
struct iov_iter *iter,
struct page **process_pages,
struct mm_struct *mm,
struct task_struct *task,
int vm_write)
{
unsigned long pa = addr & PAGE_MASK;
unsigned long start_offset = addr - pa;
unsigned long nr_pages;
ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *);
unsigned int flags = 0;
/* Work out address and page range required */
if (len == 0)
return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
if (vm_write)
flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop);
int locked = 1;
size_t bytes;
/*
* Get the pages we're interested in. We must
* access remotely because task/mm might not
* current/current->mm
*/
down_read(&mm->mmap_sem);
pages = get_user_pages_remote(task, mm, pa, pages, flags,
process_pages, NULL, &locked);
if (locked)
up_read(&mm->mmap_sem);
if (pages <= 0)
return -EFAULT;
bytes = pages * PAGE_SIZE - start_offset;
if (bytes > len)
bytes = len;
rc = process_vm_rw_pages(process_pages,
start_offset, bytes, iter,
vm_write);
len -= bytes;
start_offset = 0;
nr_pages -= pages;
pa += pages * PAGE_SIZE;
while (pages)
put_page(process_pages[--pages]);
}
return rc;
}get_user_pages_remote
获取pages
(调用链为__get_user_pages_locked
->__get_user_pages
),然后通过process_vm_rw_pages
读写物理内存。
__get_user_pages
1 | static long __get_user_pages(struct mm_struct *mm, |
process_vm_rw_pages
1 | /** |
copy_page_to_iter
1 | static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
iov_iter.c
copy_page_to_iter_iovec
1 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
copyout
1 | static int copyout(void __user *to, const void *from, size_t n) |