Import of kernel-4.18.0-553.89.1.el8_10
This commit is contained in:
parent
be886766a2
commit
50d3a07414
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 553.87.1
|
RHEL_RELEASE = 553.89.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# ZSTREAM
|
# ZSTREAM
|
||||||
|
|||||||
@ -99,6 +99,7 @@ err2:
|
|||||||
kfree(mr->map[i]);
|
kfree(mr->map[i]);
|
||||||
|
|
||||||
kfree(mr->map);
|
kfree(mr->map);
|
||||||
|
mr->map = NULL;
|
||||||
err1:
|
err1:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -123,7 +124,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
int num_buf;
|
int num_buf;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
int err;
|
int err;
|
||||||
int i;
|
|
||||||
|
|
||||||
umem = ib_umem_get(udata, start, length, access);
|
umem = ib_umem_get(udata, start, length, access);
|
||||||
if (IS_ERR(umem)) {
|
if (IS_ERR(umem)) {
|
||||||
@ -164,9 +164,8 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
pr_warn("%s: Unable to get virtual address\n",
|
pr_warn("%s: Unable to get virtual address\n",
|
||||||
__func__);
|
__func__);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_cleanup_map;
|
goto err_release_umem;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->addr = (uintptr_t)vaddr;
|
buf->addr = (uintptr_t)vaddr;
|
||||||
buf->size = PAGE_SIZE;
|
buf->size = PAGE_SIZE;
|
||||||
num_buf++;
|
num_buf++;
|
||||||
@ -186,10 +185,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_cleanup_map:
|
|
||||||
for (i = 0; i < mr->num_map; i++)
|
|
||||||
kfree(mr->map[i]);
|
|
||||||
kfree(mr->map);
|
|
||||||
err_release_umem:
|
err_release_umem:
|
||||||
ib_umem_release(umem);
|
ib_umem_release(umem);
|
||||||
err_out:
|
err_out:
|
||||||
|
|||||||
@ -529,10 +529,11 @@ static void save_state(struct rxe_send_wqe *wqe,
|
|||||||
struct rxe_send_wqe *rollback_wqe,
|
struct rxe_send_wqe *rollback_wqe,
|
||||||
u32 *rollback_psn)
|
u32 *rollback_psn)
|
||||||
{
|
{
|
||||||
rollback_wqe->state = wqe->state;
|
rollback_wqe->state = wqe->state;
|
||||||
rollback_wqe->first_psn = wqe->first_psn;
|
rollback_wqe->first_psn = wqe->first_psn;
|
||||||
rollback_wqe->last_psn = wqe->last_psn;
|
rollback_wqe->last_psn = wqe->last_psn;
|
||||||
*rollback_psn = qp->req.psn;
|
rollback_wqe->dma = wqe->dma;
|
||||||
|
*rollback_psn = qp->req.psn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rollback_state(struct rxe_send_wqe *wqe,
|
static void rollback_state(struct rxe_send_wqe *wqe,
|
||||||
@ -540,10 +541,11 @@ static void rollback_state(struct rxe_send_wqe *wqe,
|
|||||||
struct rxe_send_wqe *rollback_wqe,
|
struct rxe_send_wqe *rollback_wqe,
|
||||||
u32 rollback_psn)
|
u32 rollback_psn)
|
||||||
{
|
{
|
||||||
wqe->state = rollback_wqe->state;
|
wqe->state = rollback_wqe->state;
|
||||||
wqe->first_psn = rollback_wqe->first_psn;
|
wqe->first_psn = rollback_wqe->first_psn;
|
||||||
wqe->last_psn = rollback_wqe->last_psn;
|
wqe->last_psn = rollback_wqe->last_psn;
|
||||||
qp->req.psn = rollback_psn;
|
wqe->dma = rollback_wqe->dma;
|
||||||
|
qp->req.psn = rollback_psn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
|
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
|
||||||
@ -746,6 +748,9 @@ int rxe_requester(void *arg)
|
|||||||
pkt.mask = rxe_opcode[opcode].mask;
|
pkt.mask = rxe_opcode[opcode].mask;
|
||||||
pkt.wqe = wqe;
|
pkt.wqe = wqe;
|
||||||
|
|
||||||
|
/* save wqe state before we build and send packet */
|
||||||
|
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
|
||||||
|
|
||||||
av = rxe_get_av(&pkt, &ah);
|
av = rxe_get_av(&pkt, &ah);
|
||||||
if (unlikely(!av)) {
|
if (unlikely(!av)) {
|
||||||
pr_err("qp#%d Failed no address vector\n", qp_num(qp));
|
pr_err("qp#%d Failed no address vector\n", qp_num(qp));
|
||||||
@ -778,29 +783,29 @@ int rxe_requester(void *arg)
|
|||||||
if (ah)
|
if (ah)
|
||||||
rxe_put(ah);
|
rxe_put(ah);
|
||||||
|
|
||||||
/*
|
/* update wqe state as though we had sent it */
|
||||||
* To prevent a race on wqe access between requester and completer,
|
|
||||||
* wqe members state and psn need to be set before calling
|
|
||||||
* rxe_xmit_packet().
|
|
||||||
* Otherwise, completer might initiate an unjustified retry flow.
|
|
||||||
*/
|
|
||||||
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
|
|
||||||
update_wqe_state(qp, wqe, &pkt);
|
update_wqe_state(qp, wqe, &pkt);
|
||||||
update_wqe_psn(qp, wqe, &pkt, payload);
|
update_wqe_psn(qp, wqe, &pkt, payload);
|
||||||
|
|
||||||
err = rxe_xmit_packet(qp, &pkt, skb);
|
err = rxe_xmit_packet(qp, &pkt, skb);
|
||||||
if (err) {
|
if (err) {
|
||||||
qp->need_req_skb = 1;
|
if (err != -EAGAIN) {
|
||||||
|
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
||||||
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
goto err;
|
||||||
|
|
||||||
if (err == -EAGAIN) {
|
|
||||||
rxe_run_task(&qp->req.task, 1);
|
|
||||||
goto exit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
/* the packet was dropped so reset wqe to the state
|
||||||
goto err;
|
* before we sent it so we can try to resend
|
||||||
|
*/
|
||||||
|
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
||||||
|
|
||||||
|
/* force a delay until the dropped packet is freed and
|
||||||
|
* the send queue is drained below the low water mark
|
||||||
|
*/
|
||||||
|
qp->need_req_skb = 1;
|
||||||
|
|
||||||
|
rxe_run_task(&qp->req.task, 1);
|
||||||
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
update_state(qp, &pkt);
|
update_state(qp, &pkt);
|
||||||
|
|||||||
@ -450,6 +450,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
|
|||||||
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
|
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
flush_work(&port->rport_work);
|
||||||
|
|
||||||
retval = zfcp_unit_add(port, fcp_lun);
|
retval = zfcp_unit_add(port, fcp_lun);
|
||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
return retval;
|
||||||
|
|||||||
@ -1414,6 +1414,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
|
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
|
||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!(fl->fl_flags & FL_POSIX))
|
if (!(fl->fl_flags & FL_POSIX))
|
||||||
return -ENOLCK;
|
return -ENOLCK;
|
||||||
@ -1430,12 +1431,18 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
locks_lock_file_wait(file, fl);
|
locks_lock_file_wait(file, fl);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
if (IS_GETLK(cmd))
|
down_read(&ls->ls_sem);
|
||||||
return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
|
ret = -ENODEV;
|
||||||
else if (fl->fl_type == F_UNLCK)
|
if (likely(ls->ls_dlm != NULL)) {
|
||||||
return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
|
if (IS_GETLK(cmd))
|
||||||
else
|
ret = dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
|
||||||
return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
|
else if (fl->fl_type == F_UNLCK)
|
||||||
|
ret = dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
|
||||||
|
else
|
||||||
|
ret = dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
|
||||||
|
}
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
||||||
|
|||||||
@ -821,21 +821,22 @@ skip_inval:
|
|||||||
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
|
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
|
||||||
spin_lock(&gl->gl_lockref.lock);
|
spin_lock(&gl->gl_lockref.lock);
|
||||||
|
|
||||||
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
|
if (!ret) {
|
||||||
target == LM_ST_UNLOCKED &&
|
|
||||||
test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
|
|
||||||
/*
|
|
||||||
* The lockspace has been released and the lock has
|
|
||||||
* been unlocked implicitly.
|
|
||||||
*/
|
|
||||||
} else if (ret) {
|
|
||||||
fs_err(sdp, "lm_lock ret %d\n", ret);
|
|
||||||
target = gl->gl_state | LM_OUT_ERROR;
|
|
||||||
} else {
|
|
||||||
/* The operation will be completed asynchronously. */
|
/* The operation will be completed asynchronously. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
|
clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
|
||||||
|
|
||||||
|
if (ret == -ENODEV && gl->gl_target == LM_ST_UNLOCKED &&
|
||||||
|
target == LM_ST_UNLOCKED) {
|
||||||
|
/*
|
||||||
|
* The lockspace has been released and the lock has
|
||||||
|
* been unlocked implicitly.
|
||||||
|
*/
|
||||||
|
} else {
|
||||||
|
fs_err(sdp, "lm_lock ret %d\n", ret);
|
||||||
|
target = gl->gl_state | LM_OUT_ERROR;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Complete the operation now. */
|
/* Complete the operation now. */
|
||||||
|
|||||||
@ -665,6 +665,8 @@ struct lm_lockstruct {
|
|||||||
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
|
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
|
||||||
char *ls_lvb_bits;
|
char *ls_lvb_bits;
|
||||||
|
|
||||||
|
struct rw_semaphore ls_sem;
|
||||||
|
|
||||||
spinlock_t ls_recover_spin; /* protects following fields */
|
spinlock_t ls_recover_spin; /* protects following fields */
|
||||||
unsigned long ls_recover_flags; /* DFL_ */
|
unsigned long ls_recover_flags; /* DFL_ */
|
||||||
uint32_t ls_recover_mount; /* gen in first recover_done cb */
|
uint32_t ls_recover_mount; /* gen in first recover_done cb */
|
||||||
|
|||||||
@ -294,8 +294,13 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
again:
|
again:
|
||||||
error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
|
down_read(&ls->ls_sem);
|
||||||
GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
|
error = -ENODEV;
|
||||||
|
if (likely(ls->ls_dlm != NULL)) {
|
||||||
|
error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
|
||||||
|
GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
|
||||||
|
}
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
if (error == -EBUSY) {
|
if (error == -EBUSY) {
|
||||||
msleep(20);
|
msleep(20);
|
||||||
goto again;
|
goto again;
|
||||||
@ -335,8 +340,13 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
again:
|
again:
|
||||||
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
|
down_read(&ls->ls_sem);
|
||||||
NULL, gl);
|
error = -ENODEV;
|
||||||
|
if (likely(ls->ls_dlm != NULL)) {
|
||||||
|
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid,
|
||||||
|
DLM_LKF_VALBLK, NULL, gl);
|
||||||
|
}
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
if (error == -EBUSY) {
|
if (error == -EBUSY) {
|
||||||
msleep(20);
|
msleep(20);
|
||||||
goto again;
|
goto again;
|
||||||
@ -352,7 +362,12 @@ again:
|
|||||||
static void gdlm_cancel(struct gfs2_glock *gl)
|
static void gdlm_cancel(struct gfs2_glock *gl)
|
||||||
{
|
{
|
||||||
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
|
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
|
||||||
dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
|
|
||||||
|
down_read(&ls->ls_sem);
|
||||||
|
if (likely(ls->ls_dlm != NULL)) {
|
||||||
|
dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
|
||||||
|
}
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -533,7 +548,11 @@ static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
|
|||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
|
down_read(&ls->ls_sem);
|
||||||
|
error = -ENODEV;
|
||||||
|
if (likely(ls->ls_dlm != NULL))
|
||||||
|
error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
if (error) {
|
if (error) {
|
||||||
fs_err(sdp, "%s lkid %x error %d\n",
|
fs_err(sdp, "%s lkid %x error %d\n",
|
||||||
name, lksb->sb_lkid, error);
|
name, lksb->sb_lkid, error);
|
||||||
@ -560,9 +579,14 @@ static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
|
|||||||
memset(strname, 0, GDLM_STRNAME_BYTES);
|
memset(strname, 0, GDLM_STRNAME_BYTES);
|
||||||
snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
|
snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
|
||||||
|
|
||||||
error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
|
down_read(&ls->ls_sem);
|
||||||
strname, GDLM_STRNAME_BYTES - 1,
|
error = -ENODEV;
|
||||||
0, sync_wait_cb, ls, NULL);
|
if (likely(ls->ls_dlm != NULL)) {
|
||||||
|
error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
|
||||||
|
strname, GDLM_STRNAME_BYTES - 1,
|
||||||
|
0, sync_wait_cb, ls, NULL);
|
||||||
|
}
|
||||||
|
up_read(&ls->ls_sem);
|
||||||
if (error) {
|
if (error) {
|
||||||
fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
|
fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
|
||||||
name, lksb->sb_lkid, flags, mode, error);
|
name, lksb->sb_lkid, flags, mode, error);
|
||||||
@ -1288,6 +1312,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
|
INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
|
||||||
|
ls->ls_dlm = NULL;
|
||||||
spin_lock_init(&ls->ls_recover_spin);
|
spin_lock_init(&ls->ls_recover_spin);
|
||||||
ls->ls_recover_flags = 0;
|
ls->ls_recover_flags = 0;
|
||||||
ls->ls_recover_mount = 0;
|
ls->ls_recover_mount = 0;
|
||||||
@ -1322,6 +1347,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
|
|||||||
* create/join lockspace
|
* create/join lockspace
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
init_rwsem(&ls->ls_sem);
|
||||||
error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
|
error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
|
||||||
&gdlm_lockspace_ops, sdp, &ops_result,
|
&gdlm_lockspace_ops, sdp, &ops_result,
|
||||||
&ls->ls_dlm);
|
&ls->ls_dlm);
|
||||||
@ -1401,10 +1427,12 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
|
|||||||
|
|
||||||
/* mounted_lock and control_lock will be purged in dlm recovery */
|
/* mounted_lock and control_lock will be purged in dlm recovery */
|
||||||
release:
|
release:
|
||||||
|
down_write(&ls->ls_sem);
|
||||||
if (ls->ls_dlm) {
|
if (ls->ls_dlm) {
|
||||||
dlm_release_lockspace(ls->ls_dlm, 2);
|
dlm_release_lockspace(ls->ls_dlm, 2);
|
||||||
ls->ls_dlm = NULL;
|
ls->ls_dlm = NULL;
|
||||||
}
|
}
|
||||||
|
up_write(&ls->ls_sem);
|
||||||
|
|
||||||
free_recover_size(ls);
|
free_recover_size(ls);
|
||||||
}
|
}
|
||||||
|
|||||||
116
mm/memcontrol.c
116
mm/memcontrol.c
@ -2243,23 +2243,17 @@ void unlock_page_memcg(struct page *page)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(unlock_page_memcg);
|
EXPORT_SYMBOL(unlock_page_memcg);
|
||||||
|
|
||||||
struct obj_stock {
|
struct memcg_stock_pcp {
|
||||||
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
||||||
|
unsigned int nr_pages;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
struct obj_cgroup *cached_objcg;
|
struct obj_cgroup *cached_objcg;
|
||||||
struct pglist_data *cached_pgdat;
|
struct pglist_data *cached_pgdat;
|
||||||
unsigned int nr_bytes;
|
unsigned int nr_bytes;
|
||||||
int nr_slab_reclaimable_b;
|
int nr_slab_reclaimable_b;
|
||||||
int nr_slab_unreclaimable_b;
|
int nr_slab_unreclaimable_b;
|
||||||
#else
|
|
||||||
int dummy[0];
|
|
||||||
#endif
|
#endif
|
||||||
};
|
|
||||||
|
|
||||||
struct memcg_stock_pcp {
|
|
||||||
struct mem_cgroup *cached; /* this never be root cgroup */
|
|
||||||
unsigned int nr_pages;
|
|
||||||
struct obj_stock task_obj;
|
|
||||||
struct obj_stock irq_obj;
|
|
||||||
|
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -2269,12 +2263,12 @@ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
|
|||||||
static DEFINE_MUTEX(percpu_charge_mutex);
|
static DEFINE_MUTEX(percpu_charge_mutex);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
static void drain_obj_stock(struct obj_stock *stock);
|
static void drain_obj_stock(struct memcg_stock_pcp *stock);
|
||||||
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
||||||
struct mem_cgroup *root_memcg);
|
struct mem_cgroup *root_memcg);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void drain_obj_stock(struct obj_stock *stock)
|
static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
||||||
@ -2307,7 +2301,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
|
if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
|
||||||
stock->nr_pages -= nr_pages;
|
stock->nr_pages -= nr_pages;
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
@ -2322,7 +2316,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|||||||
*/
|
*/
|
||||||
static void drain_stock(struct memcg_stock_pcp *stock)
|
static void drain_stock(struct memcg_stock_pcp *stock)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *old = stock->cached;
|
struct mem_cgroup *old = READ_ONCE(stock->cached);
|
||||||
|
|
||||||
if (!old)
|
if (!old)
|
||||||
return;
|
return;
|
||||||
@ -2335,7 +2329,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
css_put(&old->css);
|
css_put(&old->css);
|
||||||
stock->cached = NULL;
|
WRITE_ONCE(stock->cached, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drain_local_stock(struct work_struct *dummy)
|
static void drain_local_stock(struct work_struct *dummy)
|
||||||
@ -2350,9 +2344,7 @@ static void drain_local_stock(struct work_struct *dummy)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
drain_obj_stock(&stock->irq_obj);
|
drain_obj_stock(stock);
|
||||||
if (in_task())
|
|
||||||
drain_obj_stock(&stock->task_obj);
|
|
||||||
drain_stock(stock);
|
drain_stock(stock);
|
||||||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||||
|
|
||||||
@ -2371,10 +2363,10 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
if (stock->cached != memcg) { /* reset if necessary */
|
if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
|
||||||
drain_stock(stock);
|
drain_stock(stock);
|
||||||
css_get(&memcg->css);
|
css_get(&memcg->css);
|
||||||
stock->cached = memcg;
|
WRITE_ONCE(stock->cached, memcg);
|
||||||
}
|
}
|
||||||
stock->nr_pages += nr_pages;
|
stock->nr_pages += nr_pages;
|
||||||
|
|
||||||
@ -2408,7 +2400,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|||||||
bool flush = false;
|
bool flush = false;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
memcg = stock->cached;
|
memcg = READ_ONCE(stock->cached);
|
||||||
if (memcg && stock->nr_pages &&
|
if (memcg && stock->nr_pages &&
|
||||||
mem_cgroup_is_descendant(memcg, root_memcg))
|
mem_cgroup_is_descendant(memcg, root_memcg))
|
||||||
flush = true;
|
flush = true;
|
||||||
@ -2950,41 +2942,6 @@ retry:
|
|||||||
*/
|
*/
|
||||||
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
|
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
|
||||||
|
|
||||||
/*
|
|
||||||
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
|
|
||||||
* sequence used in this case to access content from object stock is slow.
|
|
||||||
* To optimize for user context access, there are now two object stocks for
|
|
||||||
* task context and interrupt context access respectively.
|
|
||||||
*
|
|
||||||
* The task context object stock can be accessed by disabling preemption only
|
|
||||||
* which is cheap in non-preempt kernel. The interrupt context object stock
|
|
||||||
* can only be accessed after disabling interrupt. User context code can
|
|
||||||
* access interrupt object stock, but not vice versa.
|
|
||||||
*/
|
|
||||||
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
|
|
||||||
{
|
|
||||||
struct memcg_stock_pcp *stock;
|
|
||||||
|
|
||||||
if (likely(in_task())) {
|
|
||||||
*pflags = 0UL;
|
|
||||||
preempt_disable();
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
|
||||||
return &stock->task_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_save(*pflags);
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
|
||||||
return &stock->irq_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void put_obj_stock(unsigned long flags)
|
|
||||||
{
|
|
||||||
if (likely(in_task()))
|
|
||||||
preempt_enable();
|
|
||||||
else
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mod_objcg_mlstate() may be called with irq enabled, so
|
* mod_objcg_mlstate() may be called with irq enabled, so
|
||||||
* mod_memcg_lruvec_state() should be used.
|
* mod_memcg_lruvec_state() should be used.
|
||||||
@ -3263,21 +3220,24 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
|
|||||||
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
||||||
enum node_stat_item idx, int nr)
|
enum node_stat_item idx, int nr)
|
||||||
{
|
{
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct obj_stock *stock = get_obj_stock(&flags);
|
|
||||||
int *bytes;
|
int *bytes;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save vmstat data in stock and skip vmstat array update unless
|
* Save vmstat data in stock and skip vmstat array update unless
|
||||||
* accumulating over a page of vmstat data or when pgdat or idx
|
* accumulating over a page of vmstat data or when pgdat or idx
|
||||||
* changes.
|
* changes.
|
||||||
*/
|
*/
|
||||||
if (stock->cached_objcg != objcg) {
|
if (READ_ONCE(stock->cached_objcg) != objcg) {
|
||||||
drain_obj_stock(stock);
|
drain_obj_stock(stock);
|
||||||
obj_cgroup_get(objcg);
|
obj_cgroup_get(objcg);
|
||||||
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
|
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
|
||||||
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
|
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
|
||||||
stock->cached_objcg = objcg;
|
WRITE_ONCE(stock->cached_objcg, objcg);
|
||||||
stock->cached_pgdat = pgdat;
|
stock->cached_pgdat = pgdat;
|
||||||
} else if (stock->cached_pgdat != pgdat) {
|
} else if (stock->cached_pgdat != pgdat) {
|
||||||
/* Flush the existing cached vmstat data */
|
/* Flush the existing cached vmstat data */
|
||||||
@ -3317,28 +3277,31 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
|||||||
if (nr)
|
if (nr)
|
||||||
mod_objcg_mlstate(objcg, pgdat, idx, nr);
|
mod_objcg_mlstate(objcg, pgdat, idx, nr);
|
||||||
|
|
||||||
put_obj_stock(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
|
static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
|
||||||
{
|
{
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct obj_stock *stock = get_obj_stock(&flags);
|
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
|
||||||
stock->nr_bytes -= nr_bytes;
|
stock->nr_bytes -= nr_bytes;
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
put_obj_stock(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drain_obj_stock(struct obj_stock *stock)
|
static void drain_obj_stock(struct memcg_stock_pcp *stock)
|
||||||
{
|
{
|
||||||
struct obj_cgroup *old = stock->cached_objcg;
|
struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
|
||||||
|
|
||||||
if (!old)
|
if (!old)
|
||||||
return;
|
return;
|
||||||
@ -3384,21 +3347,17 @@ static void drain_obj_stock(struct obj_stock *stock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
obj_cgroup_put(old);
|
obj_cgroup_put(old);
|
||||||
stock->cached_objcg = NULL;
|
WRITE_ONCE(stock->cached_objcg, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
||||||
struct mem_cgroup *root_memcg)
|
struct mem_cgroup *root_memcg)
|
||||||
{
|
{
|
||||||
|
struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
|
|
||||||
if (in_task() && stock->task_obj.cached_objcg) {
|
if (objcg) {
|
||||||
memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
|
memcg = obj_cgroup_memcg(objcg);
|
||||||
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (stock->irq_obj.cached_objcg) {
|
|
||||||
memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
|
|
||||||
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
|
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -3409,14 +3368,17 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
|||||||
static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
||||||
bool allow_uncharge)
|
bool allow_uncharge)
|
||||||
{
|
{
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct obj_stock *stock = get_obj_stock(&flags);
|
|
||||||
unsigned int nr_pages = 0;
|
unsigned int nr_pages = 0;
|
||||||
|
|
||||||
if (stock->cached_objcg != objcg) { /* reset if necessary */
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
|
||||||
drain_obj_stock(stock);
|
drain_obj_stock(stock);
|
||||||
obj_cgroup_get(objcg);
|
obj_cgroup_get(objcg);
|
||||||
stock->cached_objcg = objcg;
|
WRITE_ONCE(stock->cached_objcg, objcg);
|
||||||
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
|
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
|
||||||
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
|
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
|
||||||
allow_uncharge = true; /* Allow uncharge when objcg changes */
|
allow_uncharge = true; /* Allow uncharge when objcg changes */
|
||||||
@ -3428,7 +3390,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
|||||||
stock->nr_bytes &= (PAGE_SIZE - 1);
|
stock->nr_bytes &= (PAGE_SIZE - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_obj_stock(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
if (nr_pages)
|
if (nr_pages)
|
||||||
obj_cgroup_uncharge_pages(objcg, nr_pages);
|
obj_cgroup_uncharge_pages(objcg, nr_pages);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user