summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
44672e4)
fs: fs_struct rwlock to spinlock
struct fs_struct.lock is an rwlock with the read-side used to protect root and
pwd members while taking references to them. Taking a reference to a path
typically requires just 2 atomic ops, so the critical section is very small.
Parallel read-side operations would have cacheline contention on the lock, the
dentry, and the vfsmount cachelines, so the rwlock is unlikely to ever give a
real parallelism increase.
Replace it with a spinlock to avoid one or two atomic operations in typical
path lookup fastpath.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
- read_lock(¤t->fs->lock);
+ spin_lock(¤t->fs->lock);
path.mnt = mntget(current->fs->root.mnt);
path.mnt = mntget(current->fs->root.mnt);
- read_unlock(¤t->fs->lock);
+ spin_unlock(¤t->fs->lock);
- read_lock(¤t->fs->lock);
+ spin_lock(¤t->fs->lock);
root = dget(current->fs->root.dentry);
root = dget(current->fs->root.dentry);
- read_unlock(¤t->fs->lock);
+ spin_unlock(¤t->fs->lock);
bprm->unsafe = tracehook_unsafe_exec(p);
n_fs = 1;
bprm->unsafe = tracehook_unsafe_exec(p);
n_fs = 1;
- write_lock(&p->fs->lock);
+ spin_lock(&p->fs->lock);
rcu_read_lock();
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
rcu_read_lock();
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
- write_unlock(&p->fs->lock);
+ spin_unlock(&p->fs->lock);
old_root = fs->root;
fs->root = *path;
path_get(path);
old_root = fs->root;
fs->root = *path;
path_get(path);
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
if (old_root.dentry)
path_put(&old_root);
}
if (old_root.dentry)
path_put(&old_root);
}
old_pwd = fs->pwd;
fs->pwd = *path;
path_get(path);
old_pwd = fs->pwd;
fs->pwd = *path;
path_get(path);
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
if (old_pwd.dentry)
path_put(&old_pwd);
if (old_pwd.dentry)
path_put(&old_pwd);
task_lock(p);
fs = p->fs;
if (fs) {
task_lock(p);
fs = p->fs;
if (fs) {
if (fs->root.dentry == old_root->dentry
&& fs->root.mnt == old_root->mnt) {
path_get(new_root);
if (fs->root.dentry == old_root->dentry
&& fs->root.mnt == old_root->mnt) {
path_get(new_root);
fs->pwd = *new_root;
count++;
}
fs->pwd = *new_root;
count++;
}
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
}
task_unlock(p);
} while_each_thread(g, p);
}
task_unlock(p);
} while_each_thread(g, p);
if (fs) {
int kill;
task_lock(tsk);
if (fs) {
int kill;
task_lock(tsk);
tsk->fs = NULL;
kill = !--fs->users;
tsk->fs = NULL;
kill = !--fs->users;
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
task_unlock(tsk);
if (kill)
free_fs_struct(fs);
if (fs) {
fs->users = 1;
fs->in_exec = 0;
if (fs) {
fs->users = 1;
fs->in_exec = 0;
- rwlock_init(&fs->lock);
+ spin_lock_init(&fs->lock);
fs->umask = old->umask;
get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
}
fs->umask = old->umask;
get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
}
return -ENOMEM;
task_lock(current);
return -ENOMEM;
task_lock(current);
kill = !--fs->users;
current->fs = new_fs;
kill = !--fs->users;
current->fs = new_fs;
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
task_unlock(current);
if (kill)
task_unlock(current);
if (kill)
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
.users = 1,
- .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- write_lock(&init_fs.lock);
+ spin_lock(&init_fs.lock);
- write_unlock(&init_fs.lock);
+ spin_unlock(&init_fs.lock);
current->fs = &init_fs;
kill = !--fs->users;
current->fs = &init_fs;
kill = !--fs->users;
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
task_unlock(current);
if (kill)
task_unlock(current);
if (kill)
struct fs_struct {
int users;
struct fs_struct {
int users;
int umask;
int in_exec;
struct path root, pwd;
int umask;
int in_exec;
struct path root, pwd;
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
*root = fs->root;
path_get(root);
*root = fs->root;
path_get(root);
- read_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
*pwd = fs->pwd;
path_get(pwd);
*pwd = fs->pwd;
path_get(pwd);
- read_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
}
static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
struct path *pwd)
{
}
static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
struct path *pwd)
{
*root = fs->root;
path_get(root);
*pwd = fs->pwd;
path_get(pwd);
*root = fs->root;
path_get(root);
*pwd = fs->pwd;
path_get(pwd);
- read_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
}
#endif /* _LINUX_FS_STRUCT_H */
}
#endif /* _LINUX_FS_STRUCT_H */
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
return -EAGAIN;
}
fs->users++;
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (new_fs) {
fs = current->fs;
if (new_fs) {
fs = current->fs;
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
- write_unlock(&fs->lock);
+ spin_unlock(&fs->lock);