mirror of
https://github.com/armbian/build.git
synced 2025-08-13 06:36:58 +02:00
* Change DEV to EDGE * Renaming patches dev folder to edge * Move patches into subdir where they will be archived. * Relink patch directories properly
1568 lines
49 KiB
Diff
1568 lines
49 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index d059e257b976a..d36b8f4228a47 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 213
|
|
+SUBLEVEL = 214
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
|
|
index b750ffef83c7d..0ec93d940d12c 100644
|
|
--- a/arch/powerpc/include/asm/bitops.h
|
|
+++ b/arch/powerpc/include/asm/bitops.h
|
|
@@ -220,15 +220,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
|
|
*/
|
|
static __inline__ int fls(unsigned int x)
|
|
{
|
|
- return 32 - __builtin_clz(x);
|
|
+ int lz;
|
|
+
|
|
+ if (__builtin_constant_p(x))
|
|
+ return x ? 32 - __builtin_clz(x) : 0;
|
|
+ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
|
|
+ return 32 - lz;
|
|
}
|
|
|
|
#include <asm-generic/bitops/builtin-__fls.h>
|
|
|
|
+/*
|
|
+ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
|
|
+ * instruction; for 32-bit we use the generic version, which does two
|
|
+ * 32-bit fls calls.
|
|
+ */
|
|
+#ifdef CONFIG_PPC64
|
|
static __inline__ int fls64(__u64 x)
|
|
{
|
|
- return 64 - __builtin_clzll(x);
|
|
+ int lz;
|
|
+
|
|
+ if (__builtin_constant_p(x))
|
|
+ return x ? 64 - __builtin_clzll(x) : 0;
|
|
+ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
|
|
+ return 64 - lz;
|
|
}
|
|
+#else
|
|
+#include <asm-generic/bitops/fls64.h>
|
|
+#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
unsigned int __arch_hweight8(unsigned int w);
|
|
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
|
|
index 280e964e1aa88..497e86cfb12e0 100644
|
|
--- a/arch/powerpc/sysdev/mpic_msgr.c
|
|
+++ b/arch/powerpc/sysdev/mpic_msgr.c
|
|
@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
|
|
|
|
/* IO map the message register block. */
|
|
of_address_to_resource(np, 0, &rsrc);
|
|
- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
|
|
+ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
|
|
if (!msgr_block_addr) {
|
|
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
|
|
return -EFAULT;
|
|
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
|
|
index f24974bddfc96..ac389ffb1822b 100644
|
|
--- a/arch/x86/entry/entry_64.S
|
|
+++ b/arch/x86/entry/entry_64.S
|
|
@@ -55,7 +55,7 @@ END(native_usergs_sysret64)
|
|
|
|
.macro TRACE_IRQS_IRETQ
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
- bt $9, EFLAGS(%rsp) /* interrupts off? */
|
|
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
|
|
jnc 1f
|
|
TRACE_IRQS_ON
|
|
1:
|
|
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
|
|
index 01fcd715485c5..c56512297fb9e 100644
|
|
--- a/drivers/iio/imu/bmi160/bmi160_core.c
|
|
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
|
|
@@ -110,6 +110,13 @@ enum bmi160_sensor_type {
|
|
|
|
struct bmi160_data {
|
|
struct regmap *regmap;
|
|
+ /*
|
|
+ * Ensure natural alignment for timestamp if present.
|
|
+ * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts.
|
|
+ * If fewer channels are enabled, less space may be needed, as
|
|
+ * long as the timestamp is still aligned to 8 bytes.
|
|
+ */
|
|
+ __le16 buf[12] __aligned(8);
|
|
};
|
|
|
|
const struct regmap_config bmi160_regmap_config = {
|
|
@@ -385,8 +392,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
|
|
struct iio_poll_func *pf = p;
|
|
struct iio_dev *indio_dev = pf->indio_dev;
|
|
struct bmi160_data *data = iio_priv(indio_dev);
|
|
- __le16 buf[12];
|
|
- /* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
|
|
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
|
|
__le16 sample;
|
|
|
|
@@ -396,10 +401,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
|
|
&sample, sizeof(sample));
|
|
if (ret < 0)
|
|
goto done;
|
|
- buf[j++] = sample;
|
|
+ data->buf[j++] = sample;
|
|
}
|
|
|
|
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
|
|
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buf,
|
|
iio_get_time_ns(indio_dev));
|
|
done:
|
|
iio_trigger_notify_done(indio_dev->trig);
|
|
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
|
|
index dad8d57f7402b..974e141c0dc04 100644
|
|
--- a/drivers/iio/magnetometer/mag3110.c
|
|
+++ b/drivers/iio/magnetometer/mag3110.c
|
|
@@ -52,6 +52,12 @@ struct mag3110_data {
|
|
struct i2c_client *client;
|
|
struct mutex lock;
|
|
u8 ctrl_reg1;
|
|
+ /* Ensure natural alignment of timestamp */
|
|
+ struct {
|
|
+ __be16 channels[3];
|
|
+ u8 temperature;
|
|
+ s64 ts __aligned(8);
|
|
+ } scan;
|
|
};
|
|
|
|
static int mag3110_request(struct mag3110_data *data)
|
|
@@ -262,10 +268,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
|
|
struct iio_poll_func *pf = p;
|
|
struct iio_dev *indio_dev = pf->indio_dev;
|
|
struct mag3110_data *data = iio_priv(indio_dev);
|
|
- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
|
|
int ret;
|
|
|
|
- ret = mag3110_read(data, (__be16 *) buffer);
|
|
+ ret = mag3110_read(data, data->scan.channels);
|
|
if (ret < 0)
|
|
goto done;
|
|
|
|
@@ -274,10 +279,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
|
|
MAG3110_DIE_TEMP);
|
|
if (ret < 0)
|
|
goto done;
|
|
- buffer[6] = ret;
|
|
+ data->scan.temperature = ret;
|
|
}
|
|
|
|
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
|
|
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
|
|
iio_get_time_ns(indio_dev));
|
|
|
|
done:
|
|
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
|
|
index e705799976c2c..2dae30713eb3d 100644
|
|
--- a/drivers/md/dm-verity-target.c
|
|
+++ b/drivers/md/dm-verity-target.c
|
|
@@ -551,6 +551,15 @@ static int verity_verify_io(struct dm_verity_io *io)
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Skip verity work in response to I/O error when system is shutting down.
|
|
+ */
|
|
+static inline bool verity_is_system_shutting_down(void)
|
|
+{
|
|
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
|
|
+ || system_state == SYSTEM_RESTART;
|
|
+}
|
|
+
|
|
/*
|
|
* End one "io" structure with a given error.
|
|
*/
|
|
@@ -578,7 +587,8 @@ static void verity_end_io(struct bio *bio)
|
|
{
|
|
struct dm_verity_io *io = bio->bi_private;
|
|
|
|
- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
|
|
+ if (bio->bi_status &&
|
|
+ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
|
|
verity_finish_io(io, bio->bi_status);
|
|
return;
|
|
}
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index d08d77b9674ff..419ecdd914f4c 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -1120,7 +1120,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|
struct md_rdev *err_rdev = NULL;
|
|
gfp_t gfp = GFP_NOIO;
|
|
|
|
- if (r10_bio->devs[slot].rdev) {
|
|
+ if (slot >= 0 && r10_bio->devs[slot].rdev) {
|
|
/*
|
|
* This is an error retry, but we cannot
|
|
* safely dereference the rdev in the r10_bio,
|
|
@@ -1513,6 +1513,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
|
|
r10_bio->mddev = mddev;
|
|
r10_bio->sector = bio->bi_iter.bi_sector;
|
|
r10_bio->state = 0;
|
|
+ r10_bio->read_slot = -1;
|
|
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
|
|
|
|
if (bio_data_dir(bio) == READ)
|
|
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
|
|
index 37f062225ed21..aac677f6aaa4f 100644
|
|
--- a/drivers/media/usb/dvb-usb/gp8psk.c
|
|
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
|
|
@@ -185,7 +185,7 @@ out_rel_fw:
|
|
|
|
static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
|
|
{
|
|
- u8 status, buf;
|
|
+ u8 status = 0, buf;
|
|
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
|
|
|
|
if (onoff) {
|
|
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
|
|
index bc089e634a751..26e20b091160a 100644
|
|
--- a/drivers/misc/vmw_vmci/vmci_context.c
|
|
+++ b/drivers/misc/vmw_vmci/vmci_context.c
|
|
@@ -751,7 +751,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
|
|
return VMCI_ERROR_MORE_DATA;
|
|
}
|
|
|
|
- dbells = kmalloc(data_size, GFP_ATOMIC);
|
|
+ dbells = kzalloc(data_size, GFP_ATOMIC);
|
|
if (!dbells)
|
|
return VMCI_ERROR_NO_MEM;
|
|
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
|
|
index d87aeff70cefb..c2cb1e711c06e 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/join.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
|
|
@@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
|
|
|
|
memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
|
|
|
|
+ if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
|
|
+ req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
|
|
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
|
|
|
|
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
|
|
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
|
|
index 8eb2b6dd36fea..1d0d9c8d0085d 100644
|
|
--- a/drivers/rtc/rtc-sun6i.c
|
|
+++ b/drivers/rtc/rtc-sun6i.c
|
|
@@ -230,7 +230,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
|
|
300000000);
|
|
if (IS_ERR(rtc->int_osc)) {
|
|
pr_crit("Couldn't register the internal oscillator\n");
|
|
- return;
|
|
+ goto err;
|
|
}
|
|
|
|
parents[0] = clk_hw_get_name(rtc->int_osc);
|
|
@@ -246,7 +246,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
|
|
rtc->losc = clk_register(NULL, &rtc->hw);
|
|
if (IS_ERR(rtc->losc)) {
|
|
pr_crit("Couldn't register the LOSC clock\n");
|
|
- return;
|
|
+ goto err_register;
|
|
}
|
|
|
|
of_property_read_string_index(node, "clock-output-names", 1,
|
|
@@ -257,7 +257,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
|
|
&rtc->lock);
|
|
if (IS_ERR(rtc->ext_losc)) {
|
|
pr_crit("Couldn't register the LOSC external gate\n");
|
|
- return;
|
|
+ goto err_register;
|
|
}
|
|
|
|
clk_data->num = 2;
|
|
@@ -266,6 +266,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
|
|
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
|
|
return;
|
|
|
|
+err_register:
|
|
+ clk_hw_unregister_fixed_rate(rtc->int_osc);
|
|
err:
|
|
kfree(clk_data);
|
|
}
|
|
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
|
|
index 487b16ace0060..0f70cae1c01e3 100644
|
|
--- a/drivers/s390/block/dasd_alias.c
|
|
+++ b/drivers/s390/block/dasd_alias.c
|
|
@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
|
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
|
|
|
rc = dasd_sleep_on(cqr);
|
|
- if (rc && !suborder_not_supported(cqr)) {
|
|
+ if (!rc)
|
|
+ goto out;
|
|
+
|
|
+ if (suborder_not_supported(cqr)) {
|
|
+ /* suborder not supported or device unusable for IO */
|
|
+ rc = -EOPNOTSUPP;
|
|
+ } else {
|
|
+ /* IO failed but should be retried */
|
|
spin_lock_irqsave(&lcu->lock, flags);
|
|
lcu->flags |= NEED_UAC_UPDATE;
|
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
|
}
|
|
+out:
|
|
dasd_kfree_request(cqr, cqr->memdev);
|
|
return rc;
|
|
}
|
|
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
|
|
index 2ce39af32cfa6..e494ffdc06bc9 100644
|
|
--- a/drivers/usb/serial/digi_acceleport.c
|
|
+++ b/drivers/usb/serial/digi_acceleport.c
|
|
@@ -23,7 +23,6 @@
|
|
#include <linux/tty_flip.h>
|
|
#include <linux/module.h>
|
|
#include <linux/spinlock.h>
|
|
-#include <linux/workqueue.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/usb.h>
|
|
#include <linux/wait.h>
|
|
@@ -202,14 +201,12 @@ struct digi_port {
|
|
int dp_throttle_restart;
|
|
wait_queue_head_t dp_flush_wait;
|
|
wait_queue_head_t dp_close_wait; /* wait queue for close */
|
|
- struct work_struct dp_wakeup_work;
|
|
struct usb_serial_port *dp_port;
|
|
};
|
|
|
|
|
|
/* Local Function Declarations */
|
|
|
|
-static void digi_wakeup_write_lock(struct work_struct *work);
|
|
static int digi_write_oob_command(struct usb_serial_port *port,
|
|
unsigned char *buf, int count, int interruptible);
|
|
static int digi_write_inb_command(struct usb_serial_port *port,
|
|
@@ -360,26 +357,6 @@ __releases(lock)
|
|
return timeout;
|
|
}
|
|
|
|
-
|
|
-/*
|
|
- * Digi Wakeup Write
|
|
- *
|
|
- * Wake up port, line discipline, and tty processes sleeping
|
|
- * on writes.
|
|
- */
|
|
-
|
|
-static void digi_wakeup_write_lock(struct work_struct *work)
|
|
-{
|
|
- struct digi_port *priv =
|
|
- container_of(work, struct digi_port, dp_wakeup_work);
|
|
- struct usb_serial_port *port = priv->dp_port;
|
|
- unsigned long flags;
|
|
-
|
|
- spin_lock_irqsave(&priv->dp_port_lock, flags);
|
|
- tty_port_tty_wakeup(&port->port);
|
|
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
|
|
-}
|
|
-
|
|
/*
|
|
* Digi Write OOB Command
|
|
*
|
|
@@ -990,6 +967,7 @@ static void digi_write_bulk_callback(struct urb *urb)
|
|
struct digi_serial *serial_priv;
|
|
int ret = 0;
|
|
int status = urb->status;
|
|
+ bool wakeup;
|
|
|
|
/* port and serial sanity check */
|
|
if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
|
|
@@ -1016,6 +994,7 @@ static void digi_write_bulk_callback(struct urb *urb)
|
|
}
|
|
|
|
/* try to send any buffered data on this port */
|
|
+ wakeup = true;
|
|
spin_lock(&priv->dp_port_lock);
|
|
priv->dp_write_urb_in_use = 0;
|
|
if (priv->dp_out_buf_len > 0) {
|
|
@@ -1031,19 +1010,18 @@ static void digi_write_bulk_callback(struct urb *urb)
|
|
if (ret == 0) {
|
|
priv->dp_write_urb_in_use = 1;
|
|
priv->dp_out_buf_len = 0;
|
|
+ wakeup = false;
|
|
}
|
|
}
|
|
- /* wake up processes sleeping on writes immediately */
|
|
- tty_port_tty_wakeup(&port->port);
|
|
- /* also queue up a wakeup at scheduler time, in case we */
|
|
- /* lost the race in write_chan(). */
|
|
- schedule_work(&priv->dp_wakeup_work);
|
|
-
|
|
spin_unlock(&priv->dp_port_lock);
|
|
+
|
|
if (ret && ret != -EPERM)
|
|
dev_err_console(port,
|
|
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
|
|
__func__, ret, priv->dp_port_num);
|
|
+
|
|
+ if (wakeup)
|
|
+ tty_port_tty_wakeup(&port->port);
|
|
}
|
|
|
|
static int digi_write_room(struct tty_struct *tty)
|
|
@@ -1243,7 +1221,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
|
|
init_waitqueue_head(&priv->dp_transmit_idle_wait);
|
|
init_waitqueue_head(&priv->dp_flush_wait);
|
|
init_waitqueue_head(&priv->dp_close_wait);
|
|
- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
|
|
priv->dp_port = port;
|
|
|
|
init_waitqueue_head(&port->write_wait);
|
|
@@ -1510,13 +1487,14 @@ static int digi_read_oob_callback(struct urb *urb)
|
|
rts = C_CRTSCTS(tty);
|
|
|
|
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
|
|
+ bool wakeup = false;
|
|
+
|
|
spin_lock(&priv->dp_port_lock);
|
|
/* convert from digi flags to termiox flags */
|
|
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
|
|
priv->dp_modem_signals |= TIOCM_CTS;
|
|
- /* port must be open to use tty struct */
|
|
if (rts)
|
|
- tty_port_tty_wakeup(&port->port);
|
|
+ wakeup = true;
|
|
} else {
|
|
priv->dp_modem_signals &= ~TIOCM_CTS;
|
|
/* port must be open to use tty struct */
|
|
@@ -1535,6 +1513,9 @@ static int digi_read_oob_callback(struct urb *urb)
|
|
priv->dp_modem_signals &= ~TIOCM_CD;
|
|
|
|
spin_unlock(&priv->dp_port_lock);
|
|
+
|
|
+ if (wakeup)
|
|
+ tty_port_tty_wakeup(&port->port);
|
|
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
|
|
spin_lock(&priv->dp_port_lock);
|
|
priv->dp_transmit_idle = 1;
|
|
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
|
|
index 6fceefcab81db..dedc7edea5178 100644
|
|
--- a/drivers/vfio/pci/vfio_pci.c
|
|
+++ b/drivers/vfio/pci/vfio_pci.c
|
|
@@ -118,8 +118,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
|
|
int bar;
|
|
struct vfio_pci_dummy_resource *dummy_res;
|
|
|
|
- INIT_LIST_HEAD(&vdev->dummy_resources_list);
|
|
-
|
|
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
|
|
res = vdev->pdev->resource + bar;
|
|
|
|
@@ -1524,6 +1522,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
vdev->irq_type = VFIO_PCI_NUM_IRQS;
|
|
mutex_init(&vdev->igate);
|
|
spin_lock_init(&vdev->irqlock);
|
|
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
|
|
mutex_init(&vdev->vma_lock);
|
|
INIT_LIST_HEAD(&vdev->vma_list);
|
|
init_rwsem(&vdev->memory_lock);
|
|
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
|
|
index bb3f59bcfcf5b..656f9ff63edda 100644
|
|
--- a/fs/quota/quota_tree.c
|
|
+++ b/fs/quota/quota_tree.c
|
|
@@ -61,7 +61,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
|
|
|
memset(buf, 0, info->dqi_usable_bs);
|
|
return sb->s_op->quota_read(sb, info->dqi_type, buf,
|
|
- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
|
|
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
|
|
}
|
|
|
|
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
|
@@ -70,7 +70,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
|
ssize_t ret;
|
|
|
|
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
|
|
- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
|
|
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
|
|
if (ret != info->dqi_usable_bs) {
|
|
quota_error(sb, "dquota write failed");
|
|
if (ret >= 0)
|
|
@@ -283,7 +283,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
|
|
blk);
|
|
goto out_buf;
|
|
}
|
|
- dquot->dq_off = (blk << info->dqi_blocksize_bits) +
|
|
+ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
|
|
sizeof(struct qt_disk_dqdbheader) +
|
|
i * info->dqi_entry_size;
|
|
kfree(buf);
|
|
@@ -558,7 +558,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
|
|
ret = -EIO;
|
|
goto out_buf;
|
|
} else {
|
|
- ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
|
|
+ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
|
|
qt_disk_dqdbheader) + i * info->dqi_entry_size;
|
|
}
|
|
out_buf:
|
|
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
|
|
index 2946713cb00d6..5229038852ca1 100644
|
|
--- a/fs/reiserfs/stree.c
|
|
+++ b/fs/reiserfs/stree.c
|
|
@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
|
|
"(second one): %h", ih);
|
|
return 0;
|
|
}
|
|
+ if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
|
|
+ reiserfs_warning(NULL, "reiserfs-5093",
|
|
+ "item entry count seems wrong %h",
|
|
+ ih);
|
|
+ return 0;
|
|
+ }
|
|
prev_location = ih_location(ih);
|
|
}
|
|
|
|
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
|
|
index 85b5151911cfd..4856706fbfeb4 100644
|
|
--- a/include/linux/kdev_t.h
|
|
+++ b/include/linux/kdev_t.h
|
|
@@ -21,61 +21,61 @@
|
|
})
|
|
|
|
/* acceptable for old filesystems */
|
|
-static inline bool old_valid_dev(dev_t dev)
|
|
+static __always_inline bool old_valid_dev(dev_t dev)
|
|
{
|
|
return MAJOR(dev) < 256 && MINOR(dev) < 256;
|
|
}
|
|
|
|
-static inline u16 old_encode_dev(dev_t dev)
|
|
+static __always_inline u16 old_encode_dev(dev_t dev)
|
|
{
|
|
return (MAJOR(dev) << 8) | MINOR(dev);
|
|
}
|
|
|
|
-static inline dev_t old_decode_dev(u16 val)
|
|
+static __always_inline dev_t old_decode_dev(u16 val)
|
|
{
|
|
return MKDEV((val >> 8) & 255, val & 255);
|
|
}
|
|
|
|
-static inline u32 new_encode_dev(dev_t dev)
|
|
+static __always_inline u32 new_encode_dev(dev_t dev)
|
|
{
|
|
unsigned major = MAJOR(dev);
|
|
unsigned minor = MINOR(dev);
|
|
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
|
|
}
|
|
|
|
-static inline dev_t new_decode_dev(u32 dev)
|
|
+static __always_inline dev_t new_decode_dev(u32 dev)
|
|
{
|
|
unsigned major = (dev & 0xfff00) >> 8;
|
|
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
|
|
return MKDEV(major, minor);
|
|
}
|
|
|
|
-static inline u64 huge_encode_dev(dev_t dev)
|
|
+static __always_inline u64 huge_encode_dev(dev_t dev)
|
|
{
|
|
return new_encode_dev(dev);
|
|
}
|
|
|
|
-static inline dev_t huge_decode_dev(u64 dev)
|
|
+static __always_inline dev_t huge_decode_dev(u64 dev)
|
|
{
|
|
return new_decode_dev(dev);
|
|
}
|
|
|
|
-static inline int sysv_valid_dev(dev_t dev)
|
|
+static __always_inline int sysv_valid_dev(dev_t dev)
|
|
{
|
|
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
|
|
}
|
|
|
|
-static inline u32 sysv_encode_dev(dev_t dev)
|
|
+static __always_inline u32 sysv_encode_dev(dev_t dev)
|
|
{
|
|
return MINOR(dev) | (MAJOR(dev) << 18);
|
|
}
|
|
|
|
-static inline unsigned sysv_major(u32 dev)
|
|
+static __always_inline unsigned sysv_major(u32 dev)
|
|
{
|
|
return (dev >> 18) & 0x3fff;
|
|
}
|
|
|
|
-static inline unsigned sysv_minor(u32 dev)
|
|
+static __always_inline unsigned sysv_minor(u32 dev)
|
|
{
|
|
return dev & 0x3ffff;
|
|
}
|
|
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
|
|
index 69966c461d1c1..8820468635810 100644
|
|
--- a/include/linux/memcontrol.h
|
|
+++ b/include/linux/memcontrol.h
|
|
@@ -108,7 +108,10 @@ struct lruvec_stat {
|
|
*/
|
|
struct mem_cgroup_per_node {
|
|
struct lruvec lruvec;
|
|
- struct lruvec_stat __percpu *lruvec_stat;
|
|
+
|
|
+ struct lruvec_stat __percpu *lruvec_stat_cpu;
|
|
+ atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
|
|
+
|
|
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
|
|
|
|
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
|
|
@@ -227,10 +230,10 @@ struct mem_cgroup {
|
|
spinlock_t move_lock;
|
|
struct task_struct *move_lock_task;
|
|
unsigned long move_lock_flags;
|
|
- /*
|
|
- * percpu counter.
|
|
- */
|
|
- struct mem_cgroup_stat_cpu __percpu *stat;
|
|
+
|
|
+ struct mem_cgroup_stat_cpu __percpu *stat_cpu;
|
|
+ atomic_long_t stat[MEMCG_NR_STAT];
|
|
+ atomic_long_t events[MEMCG_NR_EVENTS];
|
|
|
|
unsigned long socket_pressure;
|
|
|
|
@@ -265,6 +268,12 @@ struct mem_cgroup {
|
|
/* WARNING: nodeinfo must be the last member here */
|
|
};
|
|
|
|
+/*
|
|
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
|
|
+ * TODO: maybe necessary to use big numbers in big irons.
|
|
+ */
|
|
+#define MEMCG_CHARGE_BATCH 32U
|
|
+
|
|
extern struct mem_cgroup *root_mem_cgroup;
|
|
|
|
static inline bool mem_cgroup_disabled(void)
|
|
@@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void)
|
|
return !cgroup_subsys_enabled(memory_cgrp_subsys);
|
|
}
|
|
|
|
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
|
|
- enum memcg_event_item event)
|
|
-{
|
|
- this_cpu_inc(memcg->stat->events[event]);
|
|
- cgroup_file_notify(&memcg->events_file);
|
|
-}
|
|
-
|
|
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
|
|
|
|
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
|
|
@@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page);
|
|
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
|
|
int idx)
|
|
{
|
|
- long val = 0;
|
|
- int cpu;
|
|
-
|
|
- for_each_possible_cpu(cpu)
|
|
- val += per_cpu(memcg->stat->count[idx], cpu);
|
|
-
|
|
- if (val < 0)
|
|
- val = 0;
|
|
-
|
|
- return val;
|
|
+ long x = atomic_long_read(&memcg->stat[idx]);
|
|
+#ifdef CONFIG_SMP
|
|
+ if (x < 0)
|
|
+ x = 0;
|
|
+#endif
|
|
+ return x;
|
|
}
|
|
|
|
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
|
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
|
|
int idx, int val)
|
|
{
|
|
- if (!mem_cgroup_disabled())
|
|
- __this_cpu_add(memcg->stat->count[idx], val);
|
|
+ long x;
|
|
+
|
|
+ if (mem_cgroup_disabled())
|
|
+ return;
|
|
+
|
|
+ x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
|
|
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
|
|
+ atomic_long_add(x, &memcg->stat[idx]);
|
|
+ x = 0;
|
|
+ }
|
|
+ __this_cpu_write(memcg->stat_cpu->count[idx], x);
|
|
}
|
|
|
|
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
|
static inline void mod_memcg_state(struct mem_cgroup *memcg,
|
|
int idx, int val)
|
|
{
|
|
- if (!mem_cgroup_disabled())
|
|
- this_cpu_add(memcg->stat->count[idx], val);
|
|
+ preempt_disable();
|
|
+ __mod_memcg_state(memcg, idx, val);
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/**
|
|
@@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
|
|
enum node_stat_item idx)
|
|
{
|
|
struct mem_cgroup_per_node *pn;
|
|
- long val = 0;
|
|
- int cpu;
|
|
+ long x;
|
|
|
|
if (mem_cgroup_disabled())
|
|
return node_page_state(lruvec_pgdat(lruvec), idx);
|
|
|
|
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
|
- for_each_possible_cpu(cpu)
|
|
- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
|
|
-
|
|
- if (val < 0)
|
|
- val = 0;
|
|
-
|
|
- return val;
|
|
+ x = atomic_long_read(&pn->lruvec_stat[idx]);
|
|
+#ifdef CONFIG_SMP
|
|
+ if (x < 0)
|
|
+ x = 0;
|
|
+#endif
|
|
+ return x;
|
|
}
|
|
|
|
static inline void __mod_lruvec_state(struct lruvec *lruvec,
|
|
enum node_stat_item idx, int val)
|
|
{
|
|
struct mem_cgroup_per_node *pn;
|
|
+ long x;
|
|
|
|
+ /* Update node */
|
|
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
|
+
|
|
if (mem_cgroup_disabled())
|
|
return;
|
|
+
|
|
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
|
+
|
|
+ /* Update memcg */
|
|
__mod_memcg_state(pn->memcg, idx, val);
|
|
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
+
|
|
+ /* Update lruvec */
|
|
+ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
|
|
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
|
|
+ atomic_long_add(x, &pn->lruvec_stat[idx]);
|
|
+ x = 0;
|
|
+ }
|
|
+ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
|
|
}
|
|
|
|
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
|
enum node_stat_item idx, int val)
|
|
{
|
|
- struct mem_cgroup_per_node *pn;
|
|
-
|
|
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
|
- if (mem_cgroup_disabled())
|
|
- return;
|
|
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
|
- mod_memcg_state(pn->memcg, idx, val);
|
|
- this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
+ preempt_disable();
|
|
+ __mod_lruvec_state(lruvec, idx, val);
|
|
+ preempt_enable();
|
|
}
|
|
|
|
static inline void __mod_lruvec_page_state(struct page *page,
|
|
enum node_stat_item idx, int val)
|
|
{
|
|
- struct mem_cgroup_per_node *pn;
|
|
+ pg_data_t *pgdat = page_pgdat(page);
|
|
+ struct lruvec *lruvec;
|
|
|
|
- __mod_node_page_state(page_pgdat(page), idx, val);
|
|
- if (mem_cgroup_disabled() || !page->mem_cgroup)
|
|
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
|
|
+ if (!page->mem_cgroup) {
|
|
+ __mod_node_page_state(pgdat, idx, val);
|
|
return;
|
|
- __mod_memcg_state(page->mem_cgroup, idx, val);
|
|
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
|
|
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
+ }
|
|
+
|
|
+ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
|
|
+ __mod_lruvec_state(lruvec, idx, val);
|
|
}
|
|
|
|
static inline void mod_lruvec_page_state(struct page *page,
|
|
enum node_stat_item idx, int val)
|
|
{
|
|
- struct mem_cgroup_per_node *pn;
|
|
-
|
|
- mod_node_page_state(page_pgdat(page), idx, val);
|
|
- if (mem_cgroup_disabled() || !page->mem_cgroup)
|
|
- return;
|
|
- mod_memcg_state(page->mem_cgroup, idx, val);
|
|
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
|
|
- this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
+ preempt_disable();
|
|
+ __mod_lruvec_page_state(page, idx, val);
|
|
+ preempt_enable();
|
|
}
|
|
|
|
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
|
gfp_t gfp_mask,
|
|
unsigned long *total_scanned);
|
|
|
|
+/* idx can be of type enum memcg_event_item or vm_event_item */
|
|
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
|
|
+ int idx, unsigned long count)
|
|
+{
|
|
+ unsigned long x;
|
|
+
|
|
+ if (mem_cgroup_disabled())
|
|
+ return;
|
|
+
|
|
+ x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
|
|
+ if (unlikely(x > MEMCG_CHARGE_BATCH)) {
|
|
+ atomic_long_add(x, &memcg->events[idx]);
|
|
+ x = 0;
|
|
+ }
|
|
+ __this_cpu_write(memcg->stat_cpu->events[idx], x);
|
|
+}
|
|
+
|
|
static inline void count_memcg_events(struct mem_cgroup *memcg,
|
|
- enum vm_event_item idx,
|
|
- unsigned long count)
|
|
+ int idx, unsigned long count)
|
|
{
|
|
- if (!mem_cgroup_disabled())
|
|
- this_cpu_add(memcg->stat->events[idx], count);
|
|
+ preempt_disable();
|
|
+ __count_memcg_events(memcg, idx, count);
|
|
+ preempt_enable();
|
|
}
|
|
|
|
-/* idx can be of type enum memcg_stat_item or node_stat_item */
|
|
+/* idx can be of type enum memcg_event_item or vm_event_item */
|
|
static inline void count_memcg_page_event(struct page *page,
|
|
int idx)
|
|
{
|
|
@@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
|
|
rcu_read_lock();
|
|
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
|
if (likely(memcg)) {
|
|
- this_cpu_inc(memcg->stat->events[idx]);
|
|
+ count_memcg_events(memcg, idx, 1);
|
|
if (idx == OOM_KILL)
|
|
cgroup_file_notify(&memcg->events_file);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
+
|
|
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
|
|
+ enum memcg_event_item event)
|
|
+{
|
|
+ count_memcg_events(memcg, event, 1);
|
|
+ cgroup_file_notify(&memcg->events_file);
|
|
+}
|
|
+
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
void mem_cgroup_split_huge_fixup(struct page *head);
|
|
#endif
|
|
diff --git a/include/linux/of.h b/include/linux/of.h
|
|
index 3c108f9be5e7c..af10856159226 100644
|
|
--- a/include/linux/of.h
|
|
+++ b/include/linux/of.h
|
|
@@ -1163,6 +1163,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
|
|
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
|
|
static const struct of_device_id __of_table_##name \
|
|
__used __section(__##table##_of_table) \
|
|
+ __aligned(__alignof__(struct of_device_id)) \
|
|
= { .compatible = compat, \
|
|
.data = (fn == (fn_type)NULL) ? fn : fn }
|
|
#else
|
|
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
|
|
index 92537757590aa..dab9f34383e5b 100644
|
|
--- a/include/uapi/linux/const.h
|
|
+++ b/include/uapi/linux/const.h
|
|
@@ -25,4 +25,9 @@
|
|
#define _BITUL(x) (_AC(1,UL) << (x))
|
|
#define _BITULL(x) (_AC(1,ULL) << (x))
|
|
|
|
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
|
|
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
|
+
|
|
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
|
+
|
|
#endif /* !(_LINUX_CONST_H) */
|
|
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
|
|
index 9eae13eefc49e..1e3f1a43bf1d9 100644
|
|
--- a/include/uapi/linux/ethtool.h
|
|
+++ b/include/uapi/linux/ethtool.h
|
|
@@ -14,7 +14,7 @@
|
|
#ifndef _UAPI_LINUX_ETHTOOL_H
|
|
#define _UAPI_LINUX_ETHTOOL_H
|
|
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/types.h>
|
|
#include <linux/if_ether.h>
|
|
|
|
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
|
|
index 0ff8f7477847c..fadf2db71fe8a 100644
|
|
--- a/include/uapi/linux/kernel.h
|
|
+++ b/include/uapi/linux/kernel.h
|
|
@@ -3,13 +3,6 @@
|
|
#define _UAPI_LINUX_KERNEL_H
|
|
|
|
#include <linux/sysinfo.h>
|
|
-
|
|
-/*
|
|
- * 'kernel.h' contains some often-used function prototypes etc
|
|
- */
|
|
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
|
|
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
|
-
|
|
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
|
+#include <linux/const.h>
|
|
|
|
#endif /* _UAPI_LINUX_KERNEL_H */
|
|
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
|
|
index 42d1a434af296..0d44ebba00932 100644
|
|
--- a/include/uapi/linux/lightnvm.h
|
|
+++ b/include/uapi/linux/lightnvm.h
|
|
@@ -21,7 +21,7 @@
|
|
#define _UAPI_LINUX_LIGHTNVM_H
|
|
|
|
#ifdef __KERNEL__
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/ioctl.h>
|
|
#else /* __KERNEL__ */
|
|
#include <stdio.h>
|
|
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
|
|
index 9999cc006390d..1617eb9949a5d 100644
|
|
--- a/include/uapi/linux/mroute6.h
|
|
+++ b/include/uapi/linux/mroute6.h
|
|
@@ -2,7 +2,7 @@
|
|
#ifndef _UAPI__LINUX_MROUTE6_H
|
|
#define _UAPI__LINUX_MROUTE6_H
|
|
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/types.h>
|
|
#include <linux/sockios.h>
|
|
#include <linux/in6.h> /* For struct sockaddr_in6. */
|
|
diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
|
|
index a8283f7dbc519..b8c6bb233ac1c 100644
|
|
--- a/include/uapi/linux/netfilter/x_tables.h
|
|
+++ b/include/uapi/linux/netfilter/x_tables.h
|
|
@@ -1,7 +1,7 @@
|
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef _UAPI_X_TABLES_H
|
|
#define _UAPI_X_TABLES_H
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/types.h>
|
|
|
|
#define XT_FUNCTION_MAXNAMELEN 30
|
|
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
|
|
index 776bc92e91180..3481cde43a841 100644
|
|
--- a/include/uapi/linux/netlink.h
|
|
+++ b/include/uapi/linux/netlink.h
|
|
@@ -2,7 +2,7 @@
|
|
#ifndef _UAPI__LINUX_NETLINK_H
|
|
#define _UAPI__LINUX_NETLINK_H
|
|
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/socket.h> /* for __kernel_sa_family_t */
|
|
#include <linux/types.h>
|
|
|
|
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
|
|
index 0f272818a4d27..5fc0b7fd08470 100644
|
|
--- a/include/uapi/linux/sysctl.h
|
|
+++ b/include/uapi/linux/sysctl.h
|
|
@@ -23,7 +23,7 @@
|
|
#ifndef _UAPI_LINUX_SYSCTL_H
|
|
#define _UAPI_LINUX_SYSCTL_H
|
|
|
|
-#include <linux/kernel.h>
|
|
+#include <linux/const.h>
|
|
#include <linux/types.h>
|
|
#include <linux/compiler.h>
|
|
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 2806c9b6577c1..0b2654592d3a7 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -1789,7 +1789,6 @@ static int mod_sysfs_init(struct module *mod)
|
|
if (err)
|
|
mod_kobject_put(mod);
|
|
|
|
- /* delay uevent until full sysfs population */
|
|
out:
|
|
return err;
|
|
}
|
|
@@ -1826,7 +1825,6 @@ static int mod_sysfs_setup(struct module *mod,
|
|
add_sect_attrs(mod, info);
|
|
add_notes_attrs(mod, info);
|
|
|
|
- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
|
|
return 0;
|
|
|
|
out_unreg_modinfo_attrs:
|
|
@@ -3481,6 +3479,9 @@ static noinline int do_init_module(struct module *mod)
|
|
blocking_notifier_call_chain(&module_notify_list,
|
|
MODULE_STATE_LIVE, mod);
|
|
|
|
+ /* Delay uevent until module has finished its init routine */
|
|
+ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
|
|
+
|
|
/*
|
|
* We need to finish all async code before the module init sequence
|
|
* is done. This has potential to deadlock. For example, a newly
|
|
@@ -3801,6 +3802,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
|
MODULE_STATE_GOING, mod);
|
|
klp_module_going(mod);
|
|
bug_cleanup:
|
|
+ mod->state = MODULE_STATE_GOING;
|
|
/* module_bug_cleanup needs module_mutex protection */
|
|
mutex_lock(&module_mutex);
|
|
module_bug_cleanup(mod);
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 70707d44a6903..4e763cdccb335 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -542,39 +542,10 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
|
|
return mz;
|
|
}
|
|
|
|
-/*
|
|
- * Return page count for single (non recursive) @memcg.
|
|
- *
|
|
- * Implementation Note: reading percpu statistics for memcg.
|
|
- *
|
|
- * Both of vmstat[] and percpu_counter has threshold and do periodic
|
|
- * synchronization to implement "quick" read. There are trade-off between
|
|
- * reading cost and precision of value. Then, we may have a chance to implement
|
|
- * a periodic synchronization of counter in memcg's counter.
|
|
- *
|
|
- * But this _read() function is used for user interface now. The user accounts
|
|
- * memory usage by memory cgroup and he _always_ requires exact value because
|
|
- * he accounts memory. Even if we provide quick-and-fuzzy read, we always
|
|
- * have to visit all online cpus and make sum. So, for now, unnecessary
|
|
- * synchronization is not implemented. (just implemented for cpu hotplug)
|
|
- *
|
|
- * If there are kernel internal actions which can make use of some not-exact
|
|
- * value, and reading all cpu value can be performance bottleneck in some
|
|
- * common workload, threshold and synchronization as vmstat[] should be
|
|
- * implemented.
|
|
- *
|
|
- * The parameter idx can be of type enum memcg_event_item or vm_event_item.
|
|
- */
|
|
-
|
|
static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
|
|
int event)
|
|
{
|
|
- unsigned long val = 0;
|
|
- int cpu;
|
|
-
|
|
- for_each_possible_cpu(cpu)
|
|
- val += per_cpu(memcg->stat->events[event], cpu);
|
|
- return val;
|
|
+ return atomic_long_read(&memcg->events[event]);
|
|
}
|
|
|
|
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
|
|
@@ -586,27 +557,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
|
|
* counted as CACHE even if it's on ANON LRU.
|
|
*/
|
|
if (PageAnon(page))
|
|
- __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
|
|
+ __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
|
|
else {
|
|
- __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
|
|
+ __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
|
|
if (PageSwapBacked(page))
|
|
- __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
|
|
+ __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
|
|
}
|
|
|
|
if (compound) {
|
|
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
- __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
|
|
+ __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
|
|
}
|
|
|
|
/* pagein of a big page is an event. So, ignore page size */
|
|
if (nr_pages > 0)
|
|
- __this_cpu_inc(memcg->stat->events[PGPGIN]);
|
|
+ __count_memcg_events(memcg, PGPGIN, 1);
|
|
else {
|
|
- __this_cpu_inc(memcg->stat->events[PGPGOUT]);
|
|
+ __count_memcg_events(memcg, PGPGOUT, 1);
|
|
nr_pages = -nr_pages; /* for event */
|
|
}
|
|
|
|
- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
|
|
+ __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
|
|
}
|
|
|
|
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
|
@@ -642,8 +613,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
|
|
{
|
|
unsigned long val, next;
|
|
|
|
- val = __this_cpu_read(memcg->stat->nr_page_events);
|
|
- next = __this_cpu_read(memcg->stat->targets[target]);
|
|
+ val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
|
|
+ next = __this_cpu_read(memcg->stat_cpu->targets[target]);
|
|
/* from time_after() in jiffies.h */
|
|
if ((long)(next - val) < 0) {
|
|
switch (target) {
|
|
@@ -659,7 +630,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
|
|
default:
|
|
break;
|
|
}
|
|
- __this_cpu_write(memcg->stat->targets[target], next);
|
|
+ __this_cpu_write(memcg->stat_cpu->targets[target], next);
|
|
return true;
|
|
}
|
|
return false;
|
|
@@ -1726,11 +1697,6 @@ void unlock_page_memcg(struct page *page)
|
|
}
|
|
EXPORT_SYMBOL(unlock_page_memcg);
|
|
|
|
-/*
|
|
- * size of first charge trial. "32" comes from vmscan.c's magic value.
|
|
- * TODO: maybe necessary to use big numbers in big irons.
|
|
- */
|
|
-#define CHARGE_BATCH 32U
|
|
struct memcg_stock_pcp {
|
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
|
unsigned int nr_pages;
|
|
@@ -1758,7 +1724,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
unsigned long flags;
|
|
bool ret = false;
|
|
|
|
- if (nr_pages > CHARGE_BATCH)
|
|
+ if (nr_pages > MEMCG_CHARGE_BATCH)
|
|
return ret;
|
|
|
|
local_irq_save(flags);
|
|
@@ -1827,7 +1793,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
}
|
|
stock->nr_pages += nr_pages;
|
|
|
|
- if (stock->nr_pages > CHARGE_BATCH)
|
|
+ if (stock->nr_pages > MEMCG_CHARGE_BATCH)
|
|
drain_stock(stock);
|
|
|
|
local_irq_restore(flags);
|
|
@@ -1877,9 +1843,44 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
static int memcg_hotplug_cpu_dead(unsigned int cpu)
|
|
{
|
|
struct memcg_stock_pcp *stock;
|
|
+ struct mem_cgroup *memcg;
|
|
|
|
stock = &per_cpu(memcg_stock, cpu);
|
|
drain_stock(stock);
|
|
+
|
|
+ for_each_mem_cgroup(memcg) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < MEMCG_NR_STAT; i++) {
|
|
+ int nid;
|
|
+ long x;
|
|
+
|
|
+ x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
|
|
+ if (x)
|
|
+ atomic_long_add(x, &memcg->stat[i]);
|
|
+
|
|
+ if (i >= NR_VM_NODE_STAT_ITEMS)
|
|
+ continue;
|
|
+
|
|
+ for_each_node(nid) {
|
|
+ struct mem_cgroup_per_node *pn;
|
|
+
|
|
+ pn = mem_cgroup_nodeinfo(memcg, nid);
|
|
+ x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
|
|
+ if (x)
|
|
+ atomic_long_add(x, &pn->lruvec_stat[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < MEMCG_NR_EVENTS; i++) {
|
|
+ long x;
|
|
+
|
|
+ x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
|
|
+ if (x)
|
|
+ atomic_long_add(x, &memcg->events[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1900,7 +1901,7 @@ static void high_work_func(struct work_struct *work)
|
|
struct mem_cgroup *memcg;
|
|
|
|
memcg = container_of(work, struct mem_cgroup, high_work);
|
|
- reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
|
|
+ reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
|
|
}
|
|
|
|
/*
|
|
@@ -1924,7 +1925,7 @@ void mem_cgroup_handle_over_high(void)
|
|
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
unsigned int nr_pages)
|
|
{
|
|
- unsigned int batch = max(CHARGE_BATCH, nr_pages);
|
|
+ unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
|
|
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
struct mem_cgroup *mem_over_limit;
|
|
struct page_counter *counter;
|
|
@@ -2444,18 +2445,11 @@ void mem_cgroup_split_huge_fixup(struct page *head)
|
|
for (i = 1; i < HPAGE_PMD_NR; i++)
|
|
head[i].mem_cgroup = head->mem_cgroup;
|
|
|
|
- __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
|
|
- HPAGE_PMD_NR);
|
|
+ __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#ifdef CONFIG_MEMCG_SWAP
|
|
-static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
|
|
- int nr_entries)
|
|
-{
|
|
- this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries);
|
|
-}
|
|
-
|
|
/**
|
|
* mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
|
|
* @entry: swap entry to be moved
|
|
@@ -2479,8 +2473,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
|
|
new_id = mem_cgroup_id(to);
|
|
|
|
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
|
|
- mem_cgroup_swap_statistics(from, -1);
|
|
- mem_cgroup_swap_statistics(to, 1);
|
|
+ mod_memcg_state(from, MEMCG_SWAP, -1);
|
|
+ mod_memcg_state(to, MEMCG_SWAP, 1);
|
|
return 0;
|
|
}
|
|
return -EINVAL;
|
|
@@ -4210,8 +4204,8 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
|
|
if (!pn)
|
|
return 1;
|
|
|
|
- pn->lruvec_stat = alloc_percpu(struct lruvec_stat);
|
|
- if (!pn->lruvec_stat) {
|
|
+ pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
|
|
+ if (!pn->lruvec_stat_cpu) {
|
|
kfree(pn);
|
|
return 1;
|
|
}
|
|
@@ -4232,7 +4226,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
|
|
if (!pn)
|
|
return;
|
|
|
|
- free_percpu(pn->lruvec_stat);
|
|
+ free_percpu(pn->lruvec_stat_cpu);
|
|
kfree(pn);
|
|
}
|
|
|
|
@@ -4242,7 +4236,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
|
|
|
for_each_node(node)
|
|
free_mem_cgroup_per_node_info(memcg, node);
|
|
- free_percpu(memcg->stat);
|
|
+ free_percpu(memcg->stat_cpu);
|
|
kfree(memcg);
|
|
}
|
|
|
|
@@ -4271,8 +4265,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
|
if (memcg->id.id < 0)
|
|
goto fail;
|
|
|
|
- memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
|
|
- if (!memcg->stat)
|
|
+ memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
|
|
+ if (!memcg->stat_cpu)
|
|
goto fail;
|
|
|
|
for_each_node(node)
|
|
@@ -4632,8 +4626,8 @@ static int mem_cgroup_move_account(struct page *page,
|
|
spin_lock_irqsave(&from->move_lock, flags);
|
|
|
|
if (!anon && page_mapped(page)) {
|
|
- __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
|
|
- __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
|
|
+ __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
|
|
+ __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
|
|
}
|
|
|
|
/*
|
|
@@ -4645,16 +4639,14 @@ static int mem_cgroup_move_account(struct page *page,
|
|
struct address_space *mapping = page_mapping(page);
|
|
|
|
if (mapping_cap_account_dirty(mapping)) {
|
|
- __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
|
|
- nr_pages);
|
|
- __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
|
|
- nr_pages);
|
|
+ __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
|
|
+ __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
|
|
}
|
|
}
|
|
|
|
if (PageWriteback(page)) {
|
|
- __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
|
|
- __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
|
|
+ __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
|
|
+ __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
|
|
}
|
|
|
|
/*
|
|
@@ -5690,12 +5682,12 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
|
}
|
|
|
|
local_irq_save(flags);
|
|
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
|
|
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
|
|
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
|
|
- __this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
|
|
- __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
|
|
- __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
|
|
+ __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
|
|
+ __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
|
|
+ __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
|
|
+ __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
|
|
+ __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
|
|
+ __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
|
|
memcg_check_events(ug->memcg, ug->dummy_page);
|
|
local_irq_restore(flags);
|
|
|
|
@@ -5926,7 +5918,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
if (in_softirq())
|
|
gfp_mask = GFP_NOWAIT;
|
|
|
|
- this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
|
|
+ mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
|
|
|
|
if (try_charge(memcg, gfp_mask, nr_pages) == 0)
|
|
return true;
|
|
@@ -5947,7 +5939,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
return;
|
|
}
|
|
|
|
- this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
|
|
+ mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
|
|
|
|
refill_stock(memcg, nr_pages);
|
|
}
|
|
@@ -6071,7 +6063,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
|
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
|
|
nr_entries);
|
|
VM_BUG_ON_PAGE(oldid, page);
|
|
- mem_cgroup_swap_statistics(swap_memcg, nr_entries);
|
|
+ mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
|
|
|
|
page->mem_cgroup = NULL;
|
|
|
|
@@ -6137,7 +6129,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
|
|
mem_cgroup_id_get_many(memcg, nr_pages - 1);
|
|
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
|
|
VM_BUG_ON_PAGE(oldid, page);
|
|
- mem_cgroup_swap_statistics(memcg, nr_pages);
|
|
+ mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
|
|
|
|
return 0;
|
|
}
|
|
@@ -6165,7 +6157,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
|
|
else
|
|
page_counter_uncharge(&memcg->memsw, nr_pages);
|
|
}
|
|
- mem_cgroup_swap_statistics(memcg, -nr_pages);
|
|
+ mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
|
|
mem_cgroup_id_put_many(memcg, nr_pages);
|
|
}
|
|
rcu_read_unlock();
|
|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
|
|
index 071e09c3d8557..c78db361cbbaa 100644
|
|
--- a/sound/core/pcm_native.c
|
|
+++ b/sound/core/pcm_native.c
|
|
@@ -721,8 +721,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
|
|
runtime->boundary *= 2;
|
|
|
|
/* clear the buffer for avoiding possible kernel info leaks */
|
|
- if (runtime->dma_area && !substream->ops->copy_user)
|
|
- memset(runtime->dma_area, 0, runtime->dma_bytes);
|
|
+ if (runtime->dma_area && !substream->ops->copy_user) {
|
|
+ size_t size = runtime->dma_bytes;
|
|
+
|
|
+ if (runtime->info & SNDRV_PCM_INFO_MMAP)
|
|
+ size = PAGE_ALIGN(size);
|
|
+ memset(runtime->dma_area, 0, size);
|
|
+ }
|
|
|
|
snd_pcm_timer_resolution_change(substream);
|
|
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
|
|
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
|
|
index 719093489a2c4..7909cf6040e3d 100644
|
|
--- a/sound/core/seq/seq_queue.h
|
|
+++ b/sound/core/seq/seq_queue.h
|
|
@@ -40,10 +40,10 @@ struct snd_seq_queue {
|
|
|
|
struct snd_seq_timer *timer; /* time keeper for this queue */
|
|
int owner; /* client that 'owns' the timer */
|
|
- unsigned int locked:1, /* timer is only accesibble by owner if set */
|
|
- klocked:1, /* kernel lock (after START) */
|
|
- check_again:1,
|
|
- check_blocked:1;
|
|
+ bool locked; /* timer is only accesibble by owner if set */
|
|
+ bool klocked; /* kernel lock (after START) */
|
|
+ bool check_again; /* concurrent access happened during check */
|
|
+ bool check_blocked; /* queue being checked */
|
|
|
|
unsigned int flags; /* status flags */
|
|
unsigned int info_flags; /* info for sync */
|
|
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
|
|
index 92f5f452bee2b..369f812d70722 100644
|
|
--- a/sound/pci/hda/patch_ca0132.c
|
|
+++ b/sound/pci/hda/patch_ca0132.c
|
|
@@ -4443,11 +4443,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
|
|
/* Delay enabling the HP amp, to let the mic-detection
|
|
* state machine run.
|
|
*/
|
|
- cancel_delayed_work(&spec->unsol_hp_work);
|
|
- schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
|
|
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
|
|
if (tbl)
|
|
tbl->block_report = 1;
|
|
+ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
|
|
}
|
|
|
|
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
|
|
@@ -4625,12 +4624,25 @@ static void ca0132_free(struct hda_codec *codec)
|
|
kfree(codec->spec);
|
|
}
|
|
|
|
+#ifdef CONFIG_PM
|
|
+static int ca0132_suspend(struct hda_codec *codec)
|
|
+{
|
|
+ struct ca0132_spec *spec = codec->spec;
|
|
+
|
|
+ cancel_delayed_work_sync(&spec->unsol_hp_work);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
static const struct hda_codec_ops ca0132_patch_ops = {
|
|
.build_controls = ca0132_build_controls,
|
|
.build_pcms = ca0132_build_pcms,
|
|
.init = ca0132_init,
|
|
.free = ca0132_free,
|
|
.unsol_event = snd_hda_jack_unsol_event,
|
|
+#ifdef CONFIG_PM
|
|
+ .suspend = ca0132_suspend,
|
|
+#endif
|
|
};
|
|
|
|
static void ca0132_config(struct hda_codec *codec)
|
|
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
|
|
index 6caf94581a0e8..ecdbdb26164ea 100644
|
|
--- a/sound/usb/pcm.c
|
|
+++ b/sound/usb/pcm.c
|
|
@@ -324,6 +324,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
struct usb_host_interface *alts;
|
|
struct usb_interface *iface;
|
|
unsigned int ep;
|
|
+ unsigned int ifnum;
|
|
|
|
/* Implicit feedback sync EPs consumers are always playback EPs */
|
|
if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK)
|
|
@@ -334,44 +335,23 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
|
|
case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
|
|
ep = 0x81;
|
|
- iface = usb_ifnum_to_if(dev, 3);
|
|
-
|
|
- if (!iface || iface->num_altsetting == 0)
|
|
- return -EINVAL;
|
|
-
|
|
- alts = &iface->altsetting[1];
|
|
- goto add_sync_ep;
|
|
- break;
|
|
+ ifnum = 3;
|
|
+ goto add_sync_ep_from_ifnum;
|
|
case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
|
|
case USB_ID(0x0763, 0x2081):
|
|
ep = 0x81;
|
|
- iface = usb_ifnum_to_if(dev, 2);
|
|
-
|
|
- if (!iface || iface->num_altsetting == 0)
|
|
- return -EINVAL;
|
|
-
|
|
- alts = &iface->altsetting[1];
|
|
- goto add_sync_ep;
|
|
- case USB_ID(0x2466, 0x8003):
|
|
+ ifnum = 2;
|
|
+ goto add_sync_ep_from_ifnum;
|
|
+ case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
|
|
ep = 0x86;
|
|
- iface = usb_ifnum_to_if(dev, 2);
|
|
-
|
|
- if (!iface || iface->num_altsetting == 0)
|
|
- return -EINVAL;
|
|
-
|
|
- alts = &iface->altsetting[1];
|
|
- goto add_sync_ep;
|
|
- case USB_ID(0x1397, 0x0002):
|
|
+ ifnum = 2;
|
|
+ goto add_sync_ep_from_ifnum;
|
|
+ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
|
|
ep = 0x81;
|
|
- iface = usb_ifnum_to_if(dev, 1);
|
|
-
|
|
- if (!iface || iface->num_altsetting == 0)
|
|
- return -EINVAL;
|
|
-
|
|
- alts = &iface->altsetting[1];
|
|
- goto add_sync_ep;
|
|
-
|
|
+ ifnum = 1;
|
|
+ goto add_sync_ep_from_ifnum;
|
|
}
|
|
+
|
|
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
|
|
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
|
|
altsd->bInterfaceProtocol == 2 &&
|
|
@@ -386,6 +366,14 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
/* No quirk */
|
|
return 0;
|
|
|
|
+add_sync_ep_from_ifnum:
|
|
+ iface = usb_ifnum_to_if(dev, ifnum);
|
|
+
|
|
+ if (!iface || iface->num_altsetting < 2)
|
|
+ return -EINVAL;
|
|
+
|
|
+ alts = &iface->altsetting[1];
|
|
+
|
|
add_sync_ep:
|
|
subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
|
|
alts, ep, !subs->direction,
|