============
-
125주차 진도를 복습하였습니다.
-
vfs_caches_init()
- start_kernel 1 ~/kernel/iamroot/linux-stable/init/main.c
- vfs_caches_init 925 ~/kernel/iamroot/linux-stable/init/main.c
- mnt_init 3503 ~/kernel/iamroot/linux-stable/fs/dcache.c
- sysfs_init 3139 ~/kernel/iamroot/linux-stable/fs/namespace.c
- kern_mount 319 ~/kernel/iamroot/linux-stable/fs/sysfs/mount.c
- kern_mount_data 1924 ~/kernel/iamroot/linux-stable/include/linux/fs.h
- vfs_kern_mount 3165 ~/kernel/iamroot/linux-stable/fs/namespace.c
- mount_fs 1111 ~/kernel/iamroot/linux-stable/fs/namespace.c
- sysfs_mount 1631 // sysfs_mount(&sysfs_fs_type, 0x400000, "sysfs", NULL):
- sysfs_fill_super 369 ~/kernel/iamroot/linux-stable/fs/sysfs/mount.c
- sysfs_get_inode 82 ~/kernel/iamroot/linux-stable/fs/sysfs/mount.c
- sysfs_mount 1631 // sysfs_mount(&sysfs_fs_type, 0x400000, "sysfs", NULL):
- mount_fs 1111 ~/kernel/iamroot/linux-stable/fs/namespace.c
- vfs_kern_mount 3165 ~/kernel/iamroot/linux-stable/fs/namespace.c
- kern_mount_data 1924 ~/kernel/iamroot/linux-stable/include/linux/fs.h
- kern_mount 319 ~/kernel/iamroot/linux-stable/fs/sysfs/mount.c
- sysfs_init 3139 ~/kernel/iamroot/linux-stable/fs/namespace.c
- mnt_init 3503 ~/kernel/iamroot/linux-stable/fs/dcache.c
- vfs_caches_init 925 ~/kernel/iamroot/linux-stable/init/main.c
asmlinkage void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
// ATAG,DTB 정보로 사용
...
buffer_init();
// buffer_head 를 사용하기 위한 kmem_cache 할당자 및 max_buffer_heads 값 초기화 수행
key_init(); // null funtion
security_init(); // null funtion
dbg_late_init(); // null funtion
// totalram_pages: 총 free된 page 수
vfs_caches_init(totalram_pages);
- call: start_kernel()->vfs_caches_init()
// ARM10C 20151003
// totalram_pages: 총 free된 page 수
void __init vfs_caches_init(unsigned long mempages)
{
unsigned long reserve;
/* Base hash sizes on available memory, with a reserve equal to
150% of current kernel size */
// NOTE:
// mempages 값과 nr_free_pages() 의 값을 정확히 알 수 없음
// 계산된 reserve의 값을 XXX 로 함
// mempages: 총 free된 page 수, nr_free_pages(): 현재의 free pages 수
reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
// reserve: XXX
// mempages: 총 free된 page 수, reserve: XXX
mempages -= reserve;
// mempages: 총 free된 page 수 - XXX
// PATH_MAX: 4096
// SLAB_HWCACHE_ALIGN: 0x00002000UL, SLAB_PANIC: 0x00040000UL
// kmem_cache_create("names_cache", 4096, 0, 0x42000, NULL): kmem_cache#6
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
// names_cachep: kmem_cache#6
dcache_init();
// dcache_init에서 한일:
//
// struct dentry를 위한 kmem_cache 생성
// dentry_cache: kmem_cache#5
inode_init();
// inode_init에서 한일:
//
// struct inode를 위한 kmem_cache 생성
// inode_cachep: kmem_cache#4
// mempages: 총 free된 page 수 - XXX
files_init(mempages);
mnt_init();
- files_init에서 한일:
//
// filp_cachep: kmem_cache#3
// files_stat.max_files: (총 free된 page 수 - XXX) * 4 / 10
// sysctl_nr_open_max: 0x3FFFFFE0
//
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->owner: 0xffffffff
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&nr_files)->list)->next: &(&nr_files)->list
// (&(&nr_files)->list)->prev: &(&nr_files)->list
// (&nr_files)->count: 0
// (&nr_files)->counters: kmem_cache#26-o0 에서 할당된 4 bytes 메모리 주소
// list head 인 &percpu_counters에 &(&nr_files)->list를 연결함
- start_kernel()->vfs_caches_init()
- mnt_init()
- mnt_cahce를 할당받는다.
// ARM10C 20151024
void __init mnt_init(void)
{
unsigned u;
int err;
// sizeof(struct mount): 152 bytes, SLAB_HWCACHE_ALIGN: 0x00002000UL, SLAB_PANIC: 0x00040000UL
// kmem_cache_create("mnt_cache", 152, 0, 0x42000, NULL): kmem_cache#2
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
// mnt_cache: kmem_cache#2
// sizeof(struct hlist_head): 4 bytes, mhash_entries: 0
// alloc_large_system_hash("Mount-cache", 4, 0, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0): 16kB만큼 할당받은 메모리 주소
mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head),
mhash_entries, 19,
0,
&m_hash_shift, &m_hash_mask, 0, 0);
// mount_hashtable: 16kB만큼 할당받은 메모리 주소
// sizeof(struct hlist_head): 4 bytes, mphash_entries: 0
// alloc_large_system_hash("Mountpoint-cache", 4, 0, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0): 16kB만큼 할당받은 메모리 주소
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head),
mphash_entries, 19,
0,
&mp_hash_shift, &mp_hash_mask, 0, 0);
// mountpoint_hashtable: 16kB만큼 할당받은 메모리 주소
// mount_hashtable: 16kB만큼 할당받은 메모리 주소, mountpoint_hashtable: 16kB만큼 할당받은 메모리 주소
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
// m_hash_mask: 0xFFF
for (u = 0; u <= m_hash_mask; u++)
// u: 0
INIT_HLIST_HEAD(&mount_hashtable[u]);
// INIT_HLIST_HEAD 에서 한일:
// ((&mount_hashtable[0])->first = NULL)
// u: 1...4095 까지 loop 수행
// mp_hash_mask: 0xFFF
for (u = 0; u <= mp_hash_mask; u++)
// u: 0
INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
// INIT_HLIST_HEAD 에서 한일:
// ((&mountpoint_hashtable[0])->first = NULL)
// u: 1...4095 까지 loop 수행
err = sysfs_init();
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
// ARM10C 20151031
int __init sysfs_init(void)
{
// ENOMEM: 12
int err = -ENOMEM;
// err: -12
// sizeof(struct sysfs_dirent): 64 bytes
// kmem_cache_create("sysfs_dir_cache", 64, 0, 0, NULL): kmem_cache#1
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
0, 0, NULL);
// sysfs_dir_cachep: kmem_cache#1
// sysfs_dir_cachep: kmem_cache#1
if (!sysfs_dir_cachep)
goto out;
// sysfs_inode_init(): 0
err = sysfs_inode_init();
// err: 0
// err: 0
if (err)
goto out_err;
// register_filesystem(&sysfs_fs_type): 0
err = register_filesystem(&sysfs_fs_type);
// err: 0
// register_filesystem에서 한일:
// file_systems: &sysfs_fs_type
// err: 0
if (!err) {
sysfs_mnt = kern_mount(&sysfs_fs_type);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
// ARM10C 20151031
// &sysfs_fs_type
#define kern_mount(type) kern_mount_data(type, NULL)
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
// ARM10C 20151031
// &sysfs_fs_type, NULL
struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
{
struct vfsmount *mnt;
// type: &sysfs_fs_type, MS_KERNMOUNT: 0x400000, type->name: (&sysfs_fs_type)->name: "sysfs", data: NULL
mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151031
// type: &sysfs_fs_type, MS_KERNMOUNT: 0x400000, type->name: (&sysfs_fs_type)->name: "sysfs", data: NULL
struct vfsmount *
vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
struct mount *mnt;
struct dentry *root;
// type: &sysfs_fs_type
if (!type)
return ERR_PTR(-ENODEV);
// name: "sysfs", alloc_vfsmnt("sysfs"): kmem_cache#2-oX (struct mount)
mnt = alloc_vfsmnt(name);
// mnt: kmem_cache#2-oX (struct mount)
// mnt: kmem_cache#2-oX (struct mount)
if (!mnt)
return ERR_PTR(-ENOMEM);
// flags: 0x400000, MS_KERNMOUNT: 0x400000
if (flags & MS_KERNMOUNT)
// mnt->mnt.mnt_flags: (kmem_cache#2-oX (struct mount))->mnt.mnt_flags, MNT_INTERNAL: 0x4000
mnt->mnt.mnt_flags = MNT_INTERNAL;
// mnt->mnt.mnt_flags: (kmem_cache#2-oX (struct mount))->mnt.mnt_flags: 0x4000
// type: &sysfs_fs_type, flags: 0x400000, name: "sysfs", data: NULL
root = mount_fs(type, flags, name, data);
- alloc_vfsmnt()에서 한일:
- struct mount의 메모리를 할당 받음 kmem_cache#2-oX (struct mount)
-
- idr_layer_cache를 사용하여 struct idr_layer 의 메모리 kmem_cache#21-o0...7를 8 개를 할당 받음
- (kmem_cache#21-o0...7)->ary[0]: NULL
- (&(&mnt_id_ida)->idr)->id_free: kmem_cache#21-o7
- (&(&mnt_id_ida)->idr)->id_free_cnt: 7
-
- struct ida_bitmap 의 메모리 kmem_cache#27-oX 할당 받음
- (&mnt_id_ida)->free_bitmap: kmem_cache#27-oX
-
- (&(&mnt_id_ida)->idr)->id_free: NULL
- (&(&mnt_id_ida)->idr)->id_free_cnt: 6
- (&(&mnt_id_ida)->idr)->top: kmem_cache#21-o7 (struct idr_layer)
- (&(&mnt_id_ida)->idr)->layers: 1
- (&mnt_id_ida)->free_bitmap: NULL
-
- (kmem_cache#21-o7 (struct idr_layer))->ary[0]: NULL
- (kmem_cache#21-o7 (struct idr_layer))->layer: 0
- (kmem_cache#21-o7 (struct idr_layer))->ary[0]: kmem_cache#27-oX (struct ida_bitmap)
- (kmem_cache#21-o7 (struct idr_layer))->count: 1
-
- (kmem_cache#27-oX (struct ida_bitmap))->bitmap 의 0 bit를 1로 set 수행
-
- (kmem_cache#2-oX (struct mount))->mnt_id: 0
-
- mnt_id_start: 1
-
- (kmem_cache#2-oX (struct mount))->mnt_devname: kmem_cache#30-oX: "sysfs"
- (kmem_cache#2-oX (struct mount))->mnt_pcp: kmem_cache#26-o0 에서 할당된 8 bytes 메모리 주소
- [pcp0] (kmem_cache#2-oX (struct mount))->mnt_pcp->mnt_count: 1
-
- ((kmem_cache#2-oX (struct mount))->mnt_hash)->next: NULL
- ((kmem_cache#2-oX (struct mount))->mnt_hash)->pprev: NULL
- ((kmem_cache#2-oX (struct mount))->mnt_child)->next: (kmem_cache#2-oX (struct mount))->mnt_child
- ((kmem_cache#2-oX (struct mount))->mnt_child)->prev: (kmem_cache#2-oX (struct mount))->mnt_child
- ((kmem_cache#2-oX (struct mount))->mnt_mounts)->next: (kmem_cache#2-oX (struct mount))->mnt_mounts
- ((kmem_cache#2-oX (struct mount))->mnt_mounts)->prev: (kmem_cache#2-oX (struct mount))->mnt_mounts
- ((kmem_cache#2-oX (struct mount))->mnt_list)->next: (kmem_cache#2-oX (struct mount))->mnt_list
- ((kmem_cache#2-oX (struct mount))->mnt_list)->prev: (kmem_cache#2-oX (struct mount))->mnt_list
- ((kmem_cache#2-oX (struct mount))->mnt_expire)->next: (kmem_cache#2-oX (struct mount))->mnt_expire
- ((kmem_cache#2-oX (struct mount))->mnt_expire)->prev: (kmem_cache#2-oX (struct mount))->mnt_expire
- ((kmem_cache#2-oX (struct mount))->mnt_share)->next: (kmem_cache#2-oX (struct mount))->mnt_share
- ((kmem_cache#2-oX (struct mount))->mnt_share)->prev: (kmem_cache#2-oX (struct mount))->mnt_share
- ((kmem_cache#2-oX (struct mount))->mnt_slave_list)->next: (kmem_cache#2-oX (struct mount))->mnt_slave_list
- ((kmem_cache#2-oX (struct mount))->mnt_slave_list)->prev: (kmem_cache#2-oX (struct mount))->mnt_slave_list
- ((kmem_cache#2-oX (struct mount))->mnt_slave)->next: (kmem_cache#2-oX (struct mount))->mnt_slave
- ((kmem_cache#2-oX (struct mount))->mnt_slave)->prev: (kmem_cache#2-oX (struct mount))->mnt_slave
- ((kmem_cache#2-oX (struct mount))->mnt_fsnotify_marks)->first: NULL
### super.c::mount_fs()
* start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
```super.c
// ARM10C 20151114
// type: &sysfs_fs_type, flags: 0x400000, name: "sysfs", data: NULL
struct dentry *
mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
{
struct dentry *root;
struct super_block *sb;
char *secdata = NULL;
// secdata: NULL
// ENOMEM: 12
int error = -ENOMEM;
// error: -12
// data: NULL, type->fs_flags: (&sysfs_fs_type)->fs_flags: 8, FS_BINARY_MOUNTDATA: 2
if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
secdata = alloc_secdata();
if (!secdata)
goto out;
error = security_sb_copy_data(data, secdata);
if (error)
goto out_free_secdata;
}
// type->mount: (&sysfs_fs_type)->mount: sysfs_mount
// type: &sysfs_fs_type, flags: 0x400000, name: "sysfs", data: NULL
// sysfs_mount(&sysfs_fs_type, 0x400000, "sysfs", NULL):
root = type->mount(type, flags, name, data);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// type: &sysfs_fs_type, flags: 0x400000, name: "sysfs", data: NULL
static struct dentry *sysfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct sysfs_super_info *info;
enum kobj_ns_type type;
struct super_block *sb;
int error;
// flags: 0x400000, MS_KERNMOUNT: 0x400000
if (!(flags & MS_KERNMOUNT)) {
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
return ERR_PTR(-EPERM);
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
if (!kobj_ns_current_may_mount(type))
return ERR_PTR(-EPERM);
}
}
// sizeof(struct sysfs_super_info): 8 bytes, GFP_KERNEL: 0xD0
// kzalloc(8, GFP_KERNEL: 0xD0): kmem_cache#30-oX
info = kzalloc(sizeof(*info), GFP_KERNEL);
// info: kmem_cache#30-oX (struct sysfs_super_info)
// info: kmem_cache#30-oX (struct sysfs_super_info)
if (!info)
return ERR_PTR(-ENOMEM);
// KOBJ_NS_TYPE_NONE: 0, KOBJ_NS_TYPES: 2
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
// type: 0, info->ns[0]: (kmem_cache#30-oX (struct sysfs_super_info))->ns[0],
// kobj_ns_grab_current(0): NULL
info->ns[type] = kobj_ns_grab_current(type);
// info->ns[0]: (kmem_cache#30-oX (struct sysfs_super_info))->ns[0]: NULL
// fs_type: &sysfs_fs_type, flags: 0x400000, info: kmem_cache#30-oX (struct sysfs_super_info)
sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// fs_type: &sysfs_fs_type, sysfs_test_super, sysfs_set_super, flags: 0x400000, info: kmem_cache#30-oX (struct sysfs_super_info)
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags,
void *data)
{
struct super_block *s = NULL;
// s: NULL
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
// spin_lock에서 한일:
// &sb_lock 을 사용한 spin lock 수행
// [re] spin_lock에서 한일:
// [re] &sb_lock 을 사용한 spin lock 수행
// test: sysfs_test_super
// [re] test: sysfs_test_super
if (test) {
// &type->fs_supers: &(&sysfs_fs_type)->fs_supers
// hlist_entry_safe((&(&sysfs_fs_type)->fs_supers)->first, struct super_block, s_instances): NULL
// [re] &type->fs_supers: &(&sysfs_fs_type)->fs_supers
// [re] hlist_entry_safe((&(&sysfs_fs_type)->fs_supers)->first, struct super_block, s_instances): NULL
hlist_for_each_entry(old, &type->fs_supers, s_instances) {
// for (old = hlist_entry_safe((&type->fs_supers)->first, typeof(*(old)), s_instances);
// old; old = hlist_entry_safe((old)->s_instances.next, typeof(*(old)), s_instances))
if (!test(old, data))
continue;
if (!grab_super(old))
goto retry;
if (s) {
up_write(&s->s_umount);
destroy_super(s);
s = NULL;
}
return old;
}
}
// s: NULL
// [re] s: kmem_cache#25-oX (struct super_block)
if (!s) {
spin_unlock(&sb_lock);
// spin_unlock에서 한일:
// &sb_lock 을 사용한 spin unlock 수행
// type: &sysfs_fs_type, flags: 0x400000
// alloc_super(&sysfs_fs_type, 0x400000): kmem_cache#25-oX (struct super_block)
s = alloc_super(type, flags);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// fs_type: &sysfs_fs_type, sysfs_test_super, sysfs_set_super, flags: 0x400000, info: kmem_cache#30-oX (struct sysfs_super_info)
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags,
void *data)
{
struct super_block *s = NULL;
// s: NULL
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
// spin_lock에서 한일:
// &sb_lock 을 사용한 spin lock 수행
// [re] spin_lock에서 한일:
// [re] &sb_lock 을 사용한 spin lock 수행
// test: sysfs_test_super
// [re] test: sysfs_test_super
if (test) {
// &type->fs_supers: &(&sysfs_fs_type)->fs_supers
// hlist_entry_safe((&(&sysfs_fs_type)->fs_supers)->first, struct super_block, s_instances): NULL
// [re] &type->fs_supers: &(&sysfs_fs_type)->fs_supers
// [re] hlist_entry_safe((&(&sysfs_fs_type)->fs_supers)->first, struct super_block, s_instances): NULL
hlist_for_each_entry(old, &type->fs_supers, s_instances) {
// for (old = hlist_entry_safe((&type->fs_supers)->first, typeof(*(old)), s_instances);
// old; old = hlist_entry_safe((old)->s_instances.next, typeof(*(old)), s_instances))
if (!test(old, data))
continue;
if (!grab_super(old))
goto retry;
if (s) {
up_write(&s->s_umount);
destroy_super(s);
s = NULL;
}
return old;
}
}
// s: NULL
// [re] s: kmem_cache#25-oX (struct super_block)
if (!s) {
spin_unlock(&sb_lock);
// spin_unlock에서 한일:
// &sb_lock 을 사용한 spin unlock 수행
// type: &sysfs_fs_type, flags: 0x400000
// alloc_super(&sysfs_fs_type, 0x400000): kmem_cache#25-oX (struct super_block)
s = alloc_super(type, flags);
// s: kmem_cache#25-oX (struct super_block)
// s: kmem_cache#25-oX (struct super_block)
if (!s)
return ERR_PTR(-ENOMEM);
goto retry;
// goto retry
}
// [re] set: sysfs_set_super, [re] s: kmem_cache#25-oX (struct super_block), data: kmem_cache#30-oX (struct sysfs_super_info)
// [re] sysfs_set_super(kmem_cache#25-oX (struct super_block), kmem_cache#30-oX (struct sysfs_super_info)): 0
err = set(s, data);
// [re] err: 0
// [re] err: 0
if (err) {
spin_unlock(&sb_lock);
up_write(&s->s_umount);
destroy_super(s);
return ERR_PTR(err);
}
// [re] s->s_type: (kmem_cache#25-oX (struct super_block))->s_type, type: &sysfs_fs_type
s->s_type = type;
// [re] s->s_type: (kmem_cache#25-oX (struct super_block))->s_type: &sysfs_fs_type
// [re] s->s_id: (kmem_cache#25-oX (struct super_block))->s_id, type->name: (&sysfs_fs_type)->name: "sysfs",
// [re] sizeof((kmem_cache#25-oX (struct super_block))->s_id): 32 bytes
strlcpy(s->s_id, type->name, sizeof(s->s_id));
// [re] strlcpy에서 한일:
// s->s_id: (kmem_cache#25-oX (struct super_block))->s_id: "sysfs"
// [re] s->s_list: (kmem_cache#25-oX (struct super_block))->s_list,
list_add_tail(&s->s_list, &super_blocks);
// [re] list_add_tail에서 한일:
// list head인 &super_blocks 에 (kmem_cache#25-oX (struct super_block))->s_list을 tail에 추가
// [re] &s->s_instances: &(kmem_cache#25-oX (struct super_block))->s_instances, &type->fs_supers: &(&sysfs_fs_type)->fs_supers
hlist_add_head(&s->s_instances, &type->fs_supers);
// [re] hlist_add_head에서 한일:
// (&(kmem_cache#25-oX (struct super_block))->s_instances)->next: NULL
// (&(&sysfs_fs_type)->fs_supers)->first: &(kmem_cache#25-oX (struct super_block))->s_instances
// (&(kmem_cache#25-oX (struct super_block))->s_instances)->pprev: &(&(&sysfs_fs_type)->fs_supers)->first
spin_unlock(&sb_lock);
// [re] spin_unlock에서 한일:
// &sb_lock 을 사용한 spin unlock 수행
// [re] type: &sysfs_fs_type
get_filesystem(type);
// 2015/11/14 종료
// [re] &s->s_shrink: &(kmem_cache#25-oX (struct super_block))->s_shrink
register_shrinker(&s->s_shrink);
- alloc_super에서 한일:
// struct super_block 만큼의 메모리를 할당 받음 kmem_cache#25-oX (struct super_block)
//
// (&(&(&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->lock)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(&(&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->lock)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(&(&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->lock)->wait_lock)->rlock)->owner: 0xffffffff
// (&(&(&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->lock)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->list
// (&(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->list
// (&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->count: 0
// (&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->counters: kmem_cache#26-o0 에서 할당된 4 bytes 메모리 주소
// list head 인 &percpu_counters에 &(&(kmem_cache#25-oX (struct super_block))->s_writers.counter[0...2])->list를 연결함
//
// &(&(kmem_cache#25-oX (struct super_block))->s_writers.wait)->lock을 사용한 spinlock 초기화
// &(&(kmem_cache#25-oX (struct super_block))->s_writers.wait)->task_list를 사용한 list 초기화
// &(&(kmem_cache#25-oX (struct super_block))->s_writers.wait_unfrozen)->lock을 사용한 spinlock 초기화
// &(&(kmem_cache#25-oX (struct super_block))->s_writers.wait_unfrozen)->task_list를 사용한 list 초기화
//
// (&(kmem_cache#25-oX (struct super_block))->s_instances)->next: NULL
// (&(kmem_cache#25-oX (struct super_block))->s_instances)->pprev: NULL
// (&(kmem_cache#25-oX (struct super_block))->s_anon)->first: NULL
//
// (&(kmem_cache#25-oX (struct super_block))->s_inodes)->next: &(kmem_cache#25-oX (struct super_block))->s_inodes
// (&(kmem_cache#25-oX (struct super_block))->s_inodes)->prev: &(kmem_cache#25-oX (struct super_block))->s_inodes
//
// (&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node: kmem_cache#30-oX
// (&(&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->active_nodes)->bits[0]: 0
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].lock)->raw_lock: { { 0 } }
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].lock)->magic: 0xdead4ead
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].lock)->owner: 0xffffffff
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].lock)->owner_cpu: 0xffffffff
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].list)->next: (&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].list
// ((&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].list)->prev: (&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].list
// (&(kmem_cache#25-oX (struct super_block))->s_dentry_lru)->node[0].nr_items: 0
// (&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node: kmem_cache#30-oX
// (&(&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->active_nodes)->bits[0]: 0
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].lock)->raw_lock: { { 0 } }
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].lock)->magic: 0xdead4ead
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].lock)->owner: 0xffffffff
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].lock)->owner_cpu: 0xffffffff
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].list)->next: (&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].list
// ((&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].list)->prev: (&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].list
// (&(kmem_cache#25-oX (struct super_block))->s_inode_lru)->node[0].nr_items: 0
//
// (&(kmem_cache#25-oX (struct super_block))->s_mounts)->next: &(kmem_cache#25-oX (struct super_block))->s_mounts
// (&(kmem_cache#25-oX (struct super_block))->s_mounts)->prev: &(kmem_cache#25-oX (struct super_block))->s_mounts
//
// (&(kmem_cache#25-oX (struct super_block))->s_umount)->activity: 0
// &(&(kmem_cache#25-oX (struct super_block))->s_umount)->wait_lock을 사용한 spinlock 초기화
// (&(&(kmem_cache#25-oX (struct super_block))->s_umount)->wait_list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_umount)->wait_list
// (&(&(kmem_cache#25-oX (struct super_block))->s_umount)->wait_list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_umount)->wait_list
//
// (&(kmem_cache#25-oX (struct super_block))->s_umount)->count: 0xffff0001
//
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->count: 1
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_lock)->rlock)->owner: 0xffffffff
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_list
// (&(&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->wait_list
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->onwer: NULL
// (&(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex)->magic: &(kmem_cache#25-oX (struct super_block))->s_vfs_rename_mutex
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->count: 1
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_lock)->rlock)->owner: 0xffffffff
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_list
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->wait_list
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->onwer: NULL
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex)->magic: &(kmem_cache#25-oX (struct super_block))->s_dquot.dqio_mutex
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->count: 1
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_lock)->rlock)->owner: 0xffffffff
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_list
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->wait_list
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->onwer: NULL
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex)->magic: &(kmem_cache#25-oX (struct super_block))->s_dquot.dqonoff_mutex
// (&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->activity: 0
// &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->wait_lock을 사용한 spinlock 초기화
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->wait_list)->next: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->wait_list
// (&(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->wait_list)->prev: &(&(kmem_cache#25-oX (struct super_block))->s_dquot.dqptr_sem)->wait_list
//
// (kmem_cache#25-oX (struct super_block))->s_flags: 0x400000
// (kmem_cache#25-oX (struct super_block))->s_bdi: &default_backing_dev_info
// (kmem_cache#25-oX (struct super_block))->s_count: 1
// ((kmem_cache#25-oX (struct super_block))->s_active)->counter: 1
// (kmem_cache#25-oX (struct super_block))->s_maxbytes: 0x7fffffff
// (kmem_cache#25-oX (struct super_block))->s_op: &default_op
// (kmem_cache#25-oX (struct super_block))->s_time_gran: 1000000000
// (kmem_cache#25-oX (struct super_block))->cleancache_poolid: -1
// (kmem_cache#25-oX (struct super_block))->s_shrink.seeks: 2
// (kmem_cache#25-oX (struct super_block))->s_shrink.scan_objects: super_cache_scan
// (kmem_cache#25-oX (struct super_block))->s_shrink.count_objects: super_cache_count
// (kmem_cache#25-oX (struct super_block))->s_shrink.batch: 1024
// (kmem_cache#25-oX (struct super_block))->s_shrink.flags: 1
- [re] sysfs_set_super에서 한일:
// idr_layer_cache를 사용하여 struct idr_layer 의 메모리 kmem_cache#21-o0...7를 8 개를 할당 받음
// (kmem_cache#21-o0...7)->ary[0]: NULL
// (&(&unnamed_dev_ida)->idr)->id_free: kmem_cache#21-o7
// (&(&unnamed_dev_ida)->idr)->id_free_cnt: 7
//
// struct ida_bitmap 의 메모리 kmem_cache#27-oX 할당 받음
// (&unnamed_dev_ida)->free_bitmap: kmem_cache#27-oX
//
// (&(&unnamed_dev_ida)->idr)->id_free: NULL
// (&(&unnamed_dev_ida)->idr)->id_free_cnt: 6
// (&(&unnamed_dev_ida)->idr)->top: kmem_cache#21-o7 (struct idr_layer)
// (&(&unnamed_dev_ida)->idr)->layers: 1
// (&unnamed_dev_ida)->free_bitmap: NULL
//
// (kmem_cache#21-o7 (struct idr_layer))->ary[0]: NULL
// (kmem_cache#21-o7 (struct idr_layer))->layer: 0
// (kmem_cache#21-o7 (struct idr_layer))->ary[0]: kmem_cache#27-oX (struct ida_bitmap)
// (kmem_cache#21-o7 (struct idr_layer))->count: 1
//
// (kmem_cache#27-oX (struct ida_bitmap))->bitmap 의 0 bit를 1로 set 수행
//
// unnamed_dev_start: 1
//
// (kmem_cache#25-oX (struct super_block))->s_dev: 0
// (kmem_cache#25-oX (struct super_block))->s_bdi: &noop_backing_dev_info
// (kmem_cache#25-oX (struct super_block))->s_fs_info: kmem_cache#30-oX (struct sysfs_super_info)
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// [re] &s->s_shrink: &(kmem_cache#25-oX (struct super_block))->s_shrink
int register_shrinker(struct shrinker *shrinker)
{
size_t size = sizeof(*shrinker->nr_deferred);
/*
* If we only have one possible node in the system anyway, save
* ourselves the trouble and disable NUMA aware behavior. This way we
* will save memory and some small loop time later.
*/
if (nr_node_ids == 1)
shrinker->flags &= ~SHRINKER_NUMA_AWARE;
if (shrinker->flags & SHRINKER_NUMA_AWARE)
size *= nr_node_ids;
shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
if (!shrinker->nr_deferred)
return -ENOMEM;
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
return 0;
}
EXPORT_SYMBOL(register_shrinker);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// &s->s_umount: &(kmem_cache#25-oX (struct super_block))->s_umount, SINGLE_DEPTH_NESTING: 1
void __sched down_write(struct rw_semaphore *sem)
{
might_sleep(); // null function
// &sem->dep_map: &(&(kmem_cache#25-oX (struct super_block))->s_umount)->dep_map
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); // null function
// sem: &(kmem_cache#25-oX (struct super_block))->s_umount
// LOCK_CONTENDED(&(kmem_cache#25-oX (struct super_block))->s_umount, __down_write_trylock, __down_write):
// __down_write(&(kmem_cache#25-oX (struct super_block))->s_umount)
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- __down_write()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
void __sched __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- __down_write()
- __down_write_nested()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */
tsk = current;
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
/* wait for someone to release the lock */
for (;;) {
/*
* That is the key to support write lock stealing: allows the
* task already on CPU to get the lock soon rather than put
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
if (sem->activity == 0)
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
sem->activity = -1;
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
- struct rwsem_waiter
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
- struct task_struct
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
#ifdef CONFIG_SMP // CONFIG_SMP=y
struct llist_node wake_entry;
int on_cpu;
struct task_struct *last_wakee;
unsigned long wakee_flips;
unsigned long wakee_flip_decay_ts;
int wake_cpu;
#endif
int on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHED // CONFIG_CGROUP_SCHED=y
struct task_group *sched_task_group;
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS // CONFIG_PREEMPT_NOTIFIERS=n
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE // CONFIG_BLK_DEV_IO_TRACE=n
unsigned int btrace_seq;
#endif
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU // CONFIG_PREEMPT_RCU=y
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU // CONFIG_TREE_PREEMPT_RCU=y
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST // CONFIG_RCU_BOOST=n
struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) // CONFIG_SCHEDSTATS=n, CONFIG_TASK_DELAY_ACCT=n
struct sched_info sched_info;
#endif
struct list_head tasks;
#ifdef CONFIG_SMP // CONFIG_SMP=y
struct plist_node pushable_tasks;
#endif
struct mm_struct *mm, *active_mm;
#ifdef CONFIG_COMPAT_BRK // CONFIG_COMPAT_BRK=y
unsigned brk_randomized:1;
#endif
#if defined(SPLIT_RSS_COUNTING) // defined
struct task_rss_stat rss_stat;
#endif
/* task state */
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
unsigned int jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
unsigned in_iowait:1;
/* task may not gain privileges */
unsigned no_new_privs:1;
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_CC_STACKPROTECTOR // CONFIG_CC_STACKPROTECTOR=n
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
struct task_struct __rcu *real_parent; /* real parent process */
struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
* p->ptrace_entry is p's link on the p->parent->ptraced list.
*/
struct list_head ptraced;
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
// PIDTYPE_MAX: 3
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=n
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN // CONFIG_VIRT_CPU_ACCOUNTING_GEN=n
seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
VTIME_USER,
VTIME_SYS,
} vtime_snap_whence;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
struct timespec real_start_time; /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
// TASK_COMM_LEN: 16
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC // CONFIG_SYSVIPC=y
/* ipc stuff */
struct sysv_sem sysvsem;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK // CONFIG_DETECT_HUNG_TASK=y
/* hung task detection */
unsigned long last_switch_count;
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
struct files_struct *files;
/* namespaces */
struct nsproxy *nsproxy;
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
struct callback_head *task_works;
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL // CONFIG_AUDITSYSCALL=n
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
* mempolicy */
spinlock_t alloc_lock;
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES // CONFIG_RT_MUTEXES=y
/* PI waiters blocked on a rt_mutex held by this task */
struct plist_head pi_waiters;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
#ifdef CONFIG_DEBUG_MUTEXES // CONFIG_DEBUG_MUTEXES=y
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS // CONFIG_TRACE_IRQFLAGS=n
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
int hardirqs_enabled;
int hardirq_context;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP // CONFIG_LOCKDEP=n
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
void *journal_info;
/* stacked block device info */
struct bio_list *bio_list;
#ifdef CONFIG_BLOCK // CONFIG_BLOCK=y
/* stack plugging */
struct blk_plug *plug;
#endif
/* VM state */
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT) // CONFIG_TASK_XACCT=n
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS // CONFIG_CPUSETS=n
nodemask_t mems_allowed; /* Protected by alloc_lock */
seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS // CONFIG_CGROUPS=y
/* Control Group info protected by css_set_lock */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX // CONFIG_FUTEX=y
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT // CONFIG_COMPAT=n
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS // CONFIG_PERF_EVENTS=n
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
#ifdef CONFIG_NUMA // CONFIG_NUMA=n
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING // CONFIG_NUMA_BALANCING=n
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
int numa_migrate_deferred;
unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
struct callback_head numa_work;
struct list_head numa_entry;
struct numa_group *numa_group;
/*
* Exponential decaying average of faults on a per-node basis.
* Scheduling placement decisions are made based on the these counts.
* The values remain static for the duration of a PTE scan
*/
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_buffer records faults per node during the current
* scan window. When the scan completes, the counts in numa_faults
* decay and these values are copied.
*/
unsigned long *numa_faults_buffer;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local. The task scan period is adapted
* based on the locality of the faults with different weights
* depending on whether they were shared or private faults
*/
unsigned long numa_faults_locality[2];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
#ifdef CONFIG_TASK_DELAY_ACCT // CONFIG_TASK_DELAY_ACCT=n
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION // CONFIG_FAULT_INJECTION=n
int make_it_fail;
#endif
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
*/
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when; /* start of a write-and-pause period */
#ifdef CONFIG_LATENCYTOP // CONFIG_LATENCYTOP=n
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER // CONFIG_FUNCTION_GRAPH_TRACER=n
/* Index of current stored address in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/* time stamp for last schedule */
unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING // CONFIG_TRACING=n
/* state flags for use by tracers */
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
/* memcg uses this to do batch job */
#ifdef CONFIG_MEMCG // CONFIG_MEMCG=n
struct memcg_batch_info {
int do_batch; /* incremented when batch uncharge started */
struct mem_cgroup *memcg; /* target memcg of uncharge */
unsigned long nr_pages; /* uncharged usage */
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
unsigned int memcg_kmem_skip_account;
struct memcg_oom_info {
struct mem_cgroup *memcg;
gfp_t gfp_mask;
int order;
unsigned int may_oom:1;
} memcg_oom;
#endif
#ifdef CONFIG_UPROBES // CONFIG_UPROBES=n
struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) // CONFIG_BCACHE=n, CONFIG_BCACHE_MODULE=n
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
};
- spinlock.h::raw_spin_lock_irqsave()
#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
- rwsem_waiter_type
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE, // 0
RWSEM_WAITING_FOR_READ // 1
};
- start_kernel()->vfs_caches_init()
-
mnt_init()
-
sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- __down_write()
- __down_write_nested()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
-
__down_write_neseted()가 주로 한일
-
sem-activity: (&(kmem_cache@#25-oX (struct super_block))->s_umount)->activity: -1
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */
tsk = current;
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
/* wait for someone to release the lock */
for (;;) {
/*
* That is the key to support write lock stealing: allows the
* task already on CPU to get the lock soon rather than put
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
if (sem->activity == 0)
break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
sem->activity = -1;
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- __down_write()
- __down_write_nested()
- list_add_tail()
- up_write()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- spin_lock()
- hlist_for_eash_entry()
- spin_unlock()
- alloc_super()
- set()
- strlcpy()
- list_add_tail()
- hlist_add_head()
- spin_unlock(&sb_lock)
- get_filesystem(type)
- register_shrinker()
- down_write()
- __down_write()
- __down_write_nested()
- list_add_tail()
- up_write()
- down_write()
- sget()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_write(sem);
}
#define rwsem_release(l, n, i) lock_release(l, n, i)
# define lock_release(l, n, i) do { } while (0)
static inline void __up_write(struct rw_semaphore *sem)
{
if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
- sget 에서 한일
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- sysfs_fill_super()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
// ARM10C 20151114
// type: &sysfs_fs_type, flags: 0x400000, name: "sysfs", data: NULL
static struct dentry *sysfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct sysfs_super_info *info;
enum kobj_ns_type type;
struct super_block *sb;
int error;
// flags: 0x400000, MS_KERNMOUNT: 0x400000
if (!(flags & MS_KERNMOUNT)) {
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
return ERR_PTR(-EPERM);
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
if (!kobj_ns_current_may_mount(type))
return ERR_PTR(-EPERM);
}
}
// sizeof(struct sysfs_super_info): 8 bytes, GFP_KERNEL: 0xD0
// kzalloc(8, GFP_KERNEL: 0xD0): kmem_cache#30-oX
info = kzalloc(sizeof(*info), GFP_KERNEL);
// info: kmem_cache#30-oX (struct sysfs_super_info)
// info: kmem_cache#30-oX (struct sysfs_super_info)
if (!info)
return ERR_PTR(-ENOMEM);
// KOBJ_NS_TYPE_NONE: 0, KOBJ_NS_TYPES: 2
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
// type: 0, info->ns[0]: (kmem_cache#30-oX (struct sysfs_super_info))->ns[0],
// kobj_ns_grab_current(0): NULL
info->ns[type] = kobj_ns_grab_current(type);
// info->ns[0]: (kmem_cache#30-oX (struct sysfs_super_info))->ns[0]: NULL
// fs_type: &sysfs_fs_type, flags: 0x400000, info: kmem_cache#30-oX (struct sysfs_super_info)
sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info);
if (IS_ERR(sb) || sb->s_fs_info != info)
free_sysfs_super_info(info);
if (IS_ERR(sb))
return ERR_CAST(sb);
if (!sb->s_root) {
error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- sysfs_fill_super()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = SYSFS_MAGIC;
sb->s_op = &sysfs_ops;
sb->s_time_gran = 1;
/* get root inode, initialize and unlock it */
mutex_lock(&sysfs_mutex);
inode = sysfs_get_inode(sb, &sysfs_root);
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- sysfs_fill_super()
- sysfs_get_inode()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
- kmem_cache_create()
- sysfs_inode_init()
- register_filesystem()
- kern_mount()
- kern_mount_data()
- vfs_kern_mount()
- alloc_vfsmnt()
- mount_fs()
- mount(): sysfs_mount()
- sget()
- sysfs_fill_super()
- sysfs_get_inode()
- mount(): sysfs_mount()
- vfs_kern_mount()
- kern_mount_data()
struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd)
{
struct inode *inode;
inode = iget_locked(sb, sd->s_ino);
if (inode && (inode->i_state & I_NEW))
sysfs_init_inode(sd, inode);
return inode;
}
// ARM10C 20151121
// sb: kmem_cache#25-oX (struct super_block), sd->s_ino: (&sysfs_root)->s_ino: 1
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
// inode_hashtable: 256KB의 메모리 공간,
// sb: kmem_cache#25-oX (struct super_block), ino: (&sysfs_root)->s_ino: 1
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
spin_lock(&inode_hash_lock);
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
if (inode) {
wait_on_inode(inode);
return inode;
}
inode = alloc_inode(sb);
907a1e83..588d9f2 master -> origin/master
Updating 907a1e83..588d9f2
Fast-forward
arch/arm/include/asm/atomic.h | 11 +
fs/inode.c | 421 ++++++++++++++++++++++++++++++++-
fs/sysfs/inode.c | 127 ++++++++++
fs/sysfs/mount.c | 12 +
fs/sysfs/sysfs.h | 23 ++
include/asm-generic/percpu.h | 9 +
include/linux/compiler.h | 1 +
include/linux/fs.h | 21 ++
include/linux/gfp.h | 3 +
include/linux/kobject_ns.h | 1 +
include/linux/list.h | 13 +
include/linux/lockdep.h | 6 +
include/linux/mutex-debug.h | 2 +
include/linux/pagemap.h | 4 +
include/linux/percpu-defs.h | 3 +
include/linux/percpu.h | 33 ++-
include/linux/security.h | 2 +
include/linux/seqlock.h | 4 +
include/linux/spinlock.h | 10 +
include/linux/spinlock_types.h | 24 ++
include/linux/stat.h | 10 +
include/linux/sysfs.h | 2 +
include/linux/time.h | 1 +
include/linux/timekeeper_internal.h | 3 +
include/linux/types.h | 1 +
include/linux/uidgid.h | 9 +-
include/uapi/asm-generic/posix_types.h | 1 +
include/uapi/linux/stat.h | 14 ++
include/uapi/linux/time.h | 1 +
kernel/locking/mutex.c | 2 +
kernel/locking/spinlock_debug.c | 2 +
kernel/time/timekeeping.c | 13 +
kernel/user.c | 1 +
mm/backing-dev.c | 1 +
mm/slub.c | 2 +
35 files changed, 785 insertions(+), 8 deletions(-)