The following commit has been merged in the master branch:
commit 6450d9724b98a6668e2f0b34b5e5be78e0219d7b
Merge: 8193bec657872f79173b270e5e6d7061ab7db55e 2750e3608b796cd913564387ffd40f276e50ad21
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Fri Jul 31 19:22:35 2020 +1000
Merge remote-tracking branch 'set_fs/set_fs-rw'
# Conflicts:
# arch/powerpc/mm/numa.c
# drivers/acpi/battery.c
# drivers/power/supply/da9030_battery.c
# lib/debugobjects.c
diff --combined Documentation/process/clang-format.rst
index 82676e5a7c6e,7f04493cba4f..57186ae91365
--- a/Documentation/process/clang-format.rst
+++ b/Documentation/process/clang-format.rst
@@@ -32,7 -32,7 +32,7 @@@ Linux distributions for a long time. Se
your repositories. Otherwise, you can either download pre-built
LLVM/clang binaries or build the source code from:
-
http://releases.llvm.org/download.html
+
https://releases.llvm.org/download.html
See more information about the tool at:
@@@ -136,7 -136,7 +136,7 @@@ In particular, some very common ones yo
static const struct file_operations uprobe_events_ops = {
.owner = THIS_MODULE,
.open = probes_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
.write = probes_write,
@@@ -147,7 -147,7 +147,7 @@@
static const struct file_operations uprobe_events_ops = {
.owner = THIS_MODULE,
.open = probes_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
.write = probes_write,
diff --combined arch/arm/mm/alignment.c
index f4bfc1cac91a,412cab88402a..8666935340db
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@@ -164,7 -164,7 +164,7 @@@ static ssize_t alignment_proc_write(str
static const struct proc_ops alignment_proc_ops = {
.proc_open = alignment_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = alignment_proc_write,
@@@ -799,7 -799,7 +799,7 @@@ static int alignment_get_thumb(struct p
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- union offset_union uninitialized_var(offset);
+ union offset_union offset;
unsigned long instrptr;
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
diff --combined arch/powerpc/mm/ptdump/hashpagetable.c
index 5b8bd34cd3a1,94a6124f9740..930d9cfd6316
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@@ -258,7 -258,7 +258,7 @@@ static int pseries_find(unsigned long e
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc)
continue;
for (j = 0; j < 4; j++) {
if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
@@@ -533,7 -533,7 +533,7 @@@ static int ptdump_open(struct inode *in
static const struct file_operations ptdump_fops = {
.open = ptdump_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined arch/powerpc/mm/ptdump/ptdump.c
index c911cd757f7d,2306235b72a3..0105376637c0
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@@ -74,10 -74,6 +74,10 @@@ struct addr_marker
static struct addr_marker address_markers[] = {
{ 0, "Start of kernel VM" },
+#ifdef MODULES_VADDR
+ { 0, "modules start" },
+ { 0, "modules end" },
+#endif
{ 0, "vmalloc() Area" },
{ 0, "vmalloc() End" },
#ifdef CONFIG_PPC64
@@@ -199,24 -195,6 +199,24 @@@ static void note_prot_wx(struct pg_stat
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
+static void note_page_update_state(struct pg_state *st, unsigned long addr,
+ unsigned int level, u64 val, unsigned long page_size)
+{
+ u64 flag = val & pg_level[level].mask;
+ u64 pa = val & PTE_RPN_MASK;
+
+ st->level = level;
+ st->current_flags = flag;
+ st->start_address = addr;
+ st->start_pa = pa;
+ st->page_size = page_size;
+
+ while (addr >= st->marker[1].start_address) {
+ st->marker++;
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+}
+
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, unsigned long page_size)
{
@@@ -225,8 -203,13 +225,8 @@@
/* At first no level is set */
if (!st->level) {
- st->level = level;
- st->current_flags = flag;
- st->start_address = addr;
- st->start_pa = pa;
- st->last_pa = pa;
- st->page_size = page_size;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ note_page_update_state(st, addr, level, val, page_size);
/*
* Dump the section of virtual memory when:
* - the PTE flags from one entry to the next differs.
@@@ -258,9 -241,19 +258,9 @@@
* Address indicates we have passed the end of the
* current section of virtual memory
*/
- while (addr >= st->marker[1].start_address) {
- st->marker++;
- pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
- }
- st->start_address = addr;
- st->start_pa = pa;
- st->last_pa = pa;
- st->page_size = page_size;
- st->current_flags = flag;
- st->level = level;
- } else {
- st->last_pa = pa;
+ note_page_update_state(st, addr, level, val, page_size);
}
+ st->last_pa = pa;
}
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
@@@ -355,15 -348,7 +355,15 @@@ static void populate_markers(void
{
int i = 0;
+#ifdef CONFIG_PPC64
address_markers[i++].start_address = PAGE_OFFSET;
+#else
+ address_markers[i++].start_address = TASK_SIZE;
+#endif
+#ifdef MODULES_VADDR
+ address_markers[i++].start_address = MODULES_VADDR;
+ address_markers[i++].start_address = MODULES_END;
+#endif
address_markers[i++].start_address = VMALLOC_START;
address_markers[i++].start_address = VMALLOC_END;
#ifdef CONFIG_PPC64
@@@ -400,7 -385,7 +400,7 @@@ static int ptdump_show(struct seq_file
struct pg_state st = {
.seq = m,
.marker = address_markers,
- .start_address = PAGE_OFFSET,
+ .start_address = IS_ENABLED(CONFIG_PPC64) ? PAGE_OFFSET : TASK_SIZE,
};
#ifdef CONFIG_PPC64
@@@ -422,7 -407,7 +422,7 @@@ static int ptdump_open(struct inode *in
static const struct file_operations ptdump_fops = {
.open = ptdump_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -444,7 -429,7 +444,7 @@@ void ptdump_check_wx(void
.seq = NULL,
.marker = address_markers,
.check_wx = true,
- .start_address = PAGE_OFFSET,
+ .start_address = IS_ENABLED(CONFIG_PPC64) ? PAGE_OFFSET : TASK_SIZE,
};
#ifdef CONFIG_PPC64
diff --combined arch/powerpc/platforms/pseries/lpar.c
index baf24eacd268,2b13a67d60e2..8acb1d365de7
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@@ -40,7 -40,6 +40,7 @@@
#include <asm/fadump.h>
#include <asm/asm-prototypes.h>
#include <asm/debugfs.h>
+#include <asm/dtl.h>
#include "pseries.h"
@@@ -585,7 -584,7 +585,7 @@@ static int vcpudispatch_stats_open(stru
static const struct proc_ops vcpudispatch_stats_proc_ops = {
.proc_open = vcpudispatch_stats_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = vcpudispatch_stats_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
@@@ -629,7 -628,7 +629,7 @@@ static int vcpudispatch_stats_freq_open
static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
.proc_open = vcpudispatch_stats_freq_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = vcpudispatch_stats_freq_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
@@@ -1681,11 -1680,9 +1681,11 @@@ static int pseries_lpar_register_proces
if (table_size)
flags |= PROC_TABLE_NEW;
- if (radix_enabled())
- flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
- else
+ if (radix_enabled()) {
+ flags |= PROC_TABLE_RADIX;
+ if (mmu_has_feature(MMU_FTR_GTSE))
+ flags |= PROC_TABLE_GTSE;
+ } else
flags |= PROC_TABLE_HPT_SLB;
for (;;) {
rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
diff --combined arch/um/kernel/process.c
index 26b5e243d3fc,f3e4bd48f6d5..77caa0202fc3
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@@ -152,7 -152,7 +152,7 @@@ void fork_handler(void
userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
-int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct * p, unsigned long tls)
{
void (*handler)(void);
@@@ -312,7 -312,7 +312,7 @@@ static ssize_t sysemu_proc_write(struc
static const struct proc_ops sysemu_proc_ops = {
.proc_open = sysemu_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = sysemu_proc_write,
diff --combined arch/x86/mm/pat/set_memory.c
index d1b2a889f035,95b12dd5bf0e..355de93199ee
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@@ -135,7 -135,7 +135,7 @@@ static inline void cpa_inc_2m_checked(v
static inline void cpa_inc_4k_install(void)
{
- cpa_4k_install++;
+ data_race(cpa_4k_install++);
}
static inline void cpa_inc_lp_sameprot(int level)
@@@ -173,7 -173,7 +173,7 @@@ static int cpastats_open(struct inode *
static const struct file_operations cpastats_fops = {
.open = cpastats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined arch/x86/platform/uv/tlb_uv.c
index 62ea907668f8,cd6ee86283e6..3639beab8a7b
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@@ -23,6 -23,18 +23,6 @@@
static struct bau_operations ops __ro_after_init;
-/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
-static const int timeout_base_ns[] = {
- 20,
- 160,
- 1280,
- 10240,
- 81920,
- 655360,
- 5242880,
- 167772160
-};
-
static int timeout_us;
static bool nobau = true;
static int nobau_perm;
@@@ -498,6 -510,70 +498,6 @@@ static inline void end_uvhub_quiesce(st
atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
}
-static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
-{
- unsigned long descriptor_status;
-
- descriptor_status = uv_read_local_mmr(mmr_offset);
- descriptor_status >>= right_shift;
- descriptor_status &= UV_ACT_STATUS_MASK;
- return descriptor_status;
-}
-
-/*
- * Wait for completion of a broadcast software ack message
- * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
- */
-static int uv1_wait_completion(struct bau_desc *bau_desc,
- struct bau_control *bcp, long try)
-{
- unsigned long descriptor_status;
- cycles_t ttm;
- u64 mmr_offset = bcp->status_mmr;
- int right_shift = bcp->status_index;
- struct ptc_stats *stat = bcp->statp;
-
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- /* spin on the status MMR, waiting for it to go idle */
- while ((descriptor_status != DS_IDLE)) {
- /*
- * Our software ack messages may be blocked because
- * there are no swack resources available. As long
- * as none of them has timed out hardware will NACK
- * our message and its state will stay IDLE.
- */
- if (descriptor_status == DS_SOURCE_TIMEOUT) {
- stat->s_stimeout++;
- return FLUSH_GIVEUP;
- } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
- stat->s_dtimeout++;
- ttm = get_cycles();
-
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
- }
-
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
- } else {
- /*
- * descriptor_status is still BUSY
- */
- cpu_relax();
- }
- descriptor_status = uv1_read_status(mmr_offset, right_shift);
- }
- bcp->conseccompletes++;
- return FLUSH_COMPLETE;
-}
-
/*
* UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
* But not currently used.
@@@ -776,6 -852,24 +776,6 @@@ static void record_send_stats(cycles_t
}
}
-/*
- * Because of a uv1 hardware bug only a limited number of concurrent
- * requests can be made.
- */
-static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
-{
- spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
- if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
- stat->s_throttles++;
- do {
- cpu_relax();
- } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
- }
-}
-
/*
* Handle the completion status of a message send.
*/
@@@ -805,30 -899,50 +805,30 @@@ static int uv_flush_send_and_wait(struc
{
int seq_number = 0;
int completion_stat = 0;
- int uv1 = 0;
long try = 0;
unsigned long index;
cycles_t time1;
cycles_t time2;
struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster = bcp->uvhub_master;
- struct uv1_bau_msg_header *uv1_hdr = NULL;
struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
- if (bcp->uvhub_version == UV_BAU_V1) {
- uv1 = 1;
- uv1_throttle(hmaster, stat);
- }
-
while (hmaster->uvhub_quiesce)
cpu_relax();
time1 = get_cycles();
- if (uv1)
- uv1_hdr = &bau_desc->header.uv1_hdr;
- else
- /* uv2 and uv3 */
- uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
+ uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
do {
if (try == 0) {
- if (uv1)
- uv1_hdr->msg_type = MSG_REGULAR;
- else
- uv2_3_hdr->msg_type = MSG_REGULAR;
+ uv2_3_hdr->msg_type = MSG_REGULAR;
seq_number = bcp->message_number++;
} else {
- if (uv1)
- uv1_hdr->msg_type = MSG_RETRY;
- else
- uv2_3_hdr->msg_type = MSG_RETRY;
+ uv2_3_hdr->msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
- if (uv1)
- uv1_hdr->sequence = seq_number;
- else
- uv2_3_hdr->sequence = seq_number;
+ uv2_3_hdr->sequence = seq_number;
index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
@@@ -1048,10 -1162,11 +1048,10 @@@ const struct cpumask *uv_flush_tlb_othe
address = TLB_FLUSH_ALL;
switch (bcp->uvhub_version) {
- case UV_BAU_V1:
case UV_BAU_V2:
case UV_BAU_V3:
- bau_desc->payload.uv1_2_3.address = address;
- bau_desc->payload.uv1_2_3.sending_cpu = cpu;
+ bau_desc->payload.uv2_3.address = address;
+ bau_desc->payload.uv2_3.sending_cpu = cpu;
break;
case UV_BAU_V4:
bau_desc->payload.uv4.address = address;
@@@ -1185,7 -1300,7 +1185,7 @@@ DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_me
if (bcp->uvhub_version == UV_BAU_V2)
process_uv2_message(&msgdesc, bcp);
else
- /* no error workaround for uv1 or uv3 */
+ /* no error workaround for uv3 */
bau_process_message(&msgdesc, bcp, 1);
msg++;
@@@ -1235,7 -1350,12 +1235,7 @@@ static void __init enable_timeouts(void
mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
write_mmr_misc_control(pnode, mmr_image);
- /*
- * UV1:
- * Subsequent reversals of the timebase bit (3) cause an
- * immediate timeout of one or all INTD resources as
- * indicated in bits 2:0 (7 causes all of them to timeout).
- */
+
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
/* do not touch the legacy mode bit */
@@@ -1550,7 -1670,7 +1550,7 @@@ static int tunables_open(struct inode *
static const struct proc_ops uv_ptc_proc_ops = {
.proc_open = ptc_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = ptc_proc_write,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
@@@ -1591,12 -1711,14 +1591,12 @@@ static void activation_descriptor_init(
{
int i;
int cpu;
- int uv1 = 0;
unsigned long gpa;
unsigned long m;
unsigned long n;
size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
- struct uv1_bau_msg_header *uv1_hdr;
struct uv2_3_bau_msg_header *uv2_3_hdr;
struct bau_control *bcp;
@@@ -1611,6 -1733,8 +1611,6 @@@
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
m = ops.bau_gpa_to_offset(gpa);
- if (is_uv1_hub())
- uv1 = 1;
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode,
@@@ -1622,15 -1746,37 +1622,15 @@@
*/
for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- if (uv1) {
- uv1_hdr = &bd2->header.uv1_hdr;
- uv1_hdr->swack_flag = 1;
- /*
- * The base_dest_nasid set in the message header
- * is the nasid of the first uvhub in the partition.
- * The bit map will indicate destination pnode numbers
- * relative to that base. They may not be consecutive
- * if nasid striding is being used.
- */
- uv1_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv1_hdr->command = UV_NET_ENDPOINT_INTD;
- uv1_hdr->int_both = 1;
- /*
- * all others need to be set to zero:
- * fairness chaining multilevel count replied_to
- */
- } else {
- /*
- * BIOS uses legacy mode, but uv2 and uv3 hardware always
- * uses native mode for selective broadcasts.
- */
- uv2_3_hdr = &bd2->header.uv2_3_hdr;
- uv2_3_hdr->swack_flag = 1;
- uv2_3_hdr->base_dest_nasid =
- UV_PNODE_TO_NASID(base_pnode);
- uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
- uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
- }
+ /*
+ * BIOS uses legacy mode, but uv2 and uv3 hardware always
+ * uses native mode for selective broadcasts.
+ */
+ uv2_3_hdr = &bd2->header.uv2_3_hdr;
+ uv2_3_hdr->swack_flag = 1;
+ uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+ uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
+ uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
}
for_each_present_cpu(cpu) {
if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@@ -1715,7 -1861,7 +1715,7 @@@ static void __init init_uvhub(int uvhub
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
- apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
+ apicid = uvhub_to_first_apicid(uvhub);
write_mmr_data_config(pnode, ((apicid << 32) | vector));
}
@@@ -1728,20 -1874,33 +1728,20 @@@ static int calculate_destination_timeou
{
unsigned long mmr_image;
int mult1;
- int mult2;
- int index;
int base;
int ret;
- unsigned long ts_ns;
-
- if (is_uv1_hub()) {
- mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
- index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
- mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
- mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- ts_ns = timeout_base_ns[index];
- ts_ns *= (mult1 * mult2);
- ret = ts_ns / 1000;
- } else {
- /* same destination timeout for uv2 and uv3 */
- /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
- mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
- if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- base = 80;
- else
- base = 10;
- mult1 = mmr_image & UV2_ACK_MASK;
- ret = mult1 * base;
- }
+
+ /* same destination timeout for uv2 and uv3 */
+ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+ base = 80;
+ else
+ base = 10;
+ mult1 = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+
return ret;
}
@@@ -1880,7 -2039,9 +1880,7 @@@ static int scan_sock(struct socket_des
bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = *smasterp;
bcp->uvhub = bdp->uvhub;
- if (is_uv1_hub())
- bcp->uvhub_version = UV_BAU_V1;
- else if (is_uv2_hub())
+ if (is_uv2_hub())
bcp->uvhub_version = UV_BAU_V2;
else if (is_uv3_hub())
bcp->uvhub_version = UV_BAU_V3;
@@@ -1962,7 -2123,7 +1962,7 @@@ static int __init init_per_cpu(int nuvh
struct uvhub_desc *uvhub_descs;
unsigned char *uvhub_mask = NULL;
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
@@@ -1990,6 -2151,17 +1990,6 @@@ fail
return 1;
}
-static const struct bau_operations uv1_bau_ops __initconst = {
- .bau_gpa_to_offset = uv_gpa_to_offset,
- .read_l_sw_ack = read_mmr_sw_ack,
- .read_g_sw_ack = read_gmmr_sw_ack,
- .write_l_sw_ack = write_mmr_sw_ack,
- .write_g_sw_ack = write_gmmr_sw_ack,
- .write_payload_first = write_mmr_payload_first,
- .write_payload_last = write_mmr_payload_last,
- .wait_completion = uv1_wait_completion,
-};
-
static const struct bau_operations uv2_3_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
@@@ -2034,6 -2206,8 +2034,6 @@@ static int __init uv_bau_init(void
ops = uv2_3_bau_ops;
else if (is_uv2_hub())
ops = uv2_3_bau_ops;
- else if (is_uv1_hub())
- ops = uv1_bau_ops;
nuvhubs = uv_num_possible_blades();
if (nuvhubs < 2) {
@@@ -2054,7 -2228,7 +2054,7 @@@
}
/* software timeouts are not supported on UV4 */
- if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
+ if (is_uv3_hub() || is_uv2_hub())
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
@@@ -2077,7 -2251,8 +2077,7 @@@
val = 1L << 63;
write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- if (!is_uv1_hub())
- write_mmr_data_broadcast(pnode, mmr);
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --combined block/blk-mq-debugfs.c
index 3f09bcb8a6fd,4e427fc88a26..5c8333d1ceed
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@@ -404,7 -404,8 +404,7 @@@ static bool hctx_show_busy_rq(struct re
const struct show_busy_params *params = data;
if (rq->mq_hctx == params->hctx)
- __blk_mq_debugfs_rq_show(params->m,
- list_entry_rq(&rq->queuelist));
+ __blk_mq_debugfs_rq_show(params->m, rq);
return true;
}
@@@ -772,7 -773,7 +772,7 @@@ static int blk_mq_debugfs_release(struc
static const struct file_operations blk_mq_debugfs_fops = {
.open = blk_mq_debugfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = blk_mq_debugfs_write,
.llseek = seq_lseek,
.release = blk_mq_debugfs_release,
@@@ -826,6 -827,9 +826,6 @@@ void blk_mq_debugfs_register(struct req
struct blk_mq_hw_ctx *hctx;
int i;
- q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
- blk_debugfs_root);
-
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
/*
@@@ -856,7 -860,9 +856,7 @@@
void blk_mq_debugfs_unregister(struct request_queue *q)
{
- debugfs_remove_recursive(q->debugfs_dir);
q->sched_debugfs_dir = NULL;
- q->debugfs_dir = NULL;
}
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
diff --combined drivers/block/nbd.c
index 3ff4054d6834,4c54d366afde..25208ff7166e
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@@ -784,7 -784,6 +784,7 @@@ static void recv_work(struct work_struc
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd;
+ struct request *rq;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
@@@ -797,9 -796,7 +797,9 @@@
break;
}
- blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
+ rq = blk_mq_rq_from_pdu(cmd);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@@@ -1547,7 -1544,7 +1547,7 @@@ static int nbd_dbg_tasks_open(struct in
static const struct file_operations nbd_dbg_tasks_ops = {
.open = nbd_dbg_tasks_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1582,7 -1579,7 +1582,7 @@@ static int nbd_dbg_flags_open(struct in
static const struct file_operations nbd_dbg_flags_ops = {
.open = nbd_dbg_flags_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/block/pktcdvd.c
index 4becc1efe775,50c6d6eacb59..c81f637141c0
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@@ -36,7 -36,7 +36,7 @@@
* block device, assembling the pieces to full packets and queuing them to the
* packet I/O scheduler.
*
- * At the top layer there is a custom make_request_fn function that forwards
+ * At the top layer there is a custom ->submit_bio function that forwards
* read requests directly to the iosched queue and puts write requests in the
* unaligned write queue. A kernel thread performs the necessary read
* gathering to convert the unaligned writes to aligned writes and then feeds
@@@ -464,7 -464,7 +464,7 @@@ static int pkt_debugfs_fops_open(struc
static const struct file_operations debug_fops = {
.open = pkt_debugfs_fops_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -913,7 -913,7 +913,7 @@@ static void pkt_iosched_process_queue(s
}
atomic_inc(&pd->cdrw.pending_bios);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@@ -2428,15 -2428,15 +2428,15 @@@ static void pkt_make_request_write(stru
}
}
-static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
struct bio *split;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
- pd = q->queuedata;
+ pd = bio->bi_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@@@ -2480,7 -2480,7 +2480,7 @@@
split = bio;
}
- pkt_make_request_write(q, split);
+ pkt_make_request_write(bio->bi_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
@@@ -2685,7 -2685,6 +2685,7 @@@ static char *pkt_devnode(struct gendis
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
+ .submit_bio = pkt_submit_bio,
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
@@@ -2750,7 -2749,7 +2750,7 @@@ static int pkt_setup_dev(dev_t dev, dev
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
- disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE);
+ disk->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!disk->queue)
goto out_mem2;
diff --combined drivers/block/rsxx/core.c
index 7e261224ff10,1adfd0f273b4..e631749e14ca
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@@ -209,7 -209,7 +209,7 @@@ static const struct file_operations deb
static const struct file_operations debugfs_stats_fops = {
.owner = THIS_MODULE,
.open = rsxx_attr_stats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -217,7 -217,7 +217,7 @@@
static const struct file_operations debugfs_pci_regs_fops = {
.owner = THIS_MODULE,
.open = rsxx_attr_pci_regs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -562,15 -562,13 +562,15 @@@ static int rsxx_eeh_frozen(struct pci_d
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
return 0;
@@@ -627,7 -625,7 +627,7 @@@ static int rsxx_eeh_fifo_flush_poll(str
}
static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
- enum pci_channel_state error)
+ pci_channel_state_t error)
{
int st;
@@@ -713,15 -711,15 +713,15 @@@ static pci_ers_result_t rsxx_slot_reset
failed_hw_buffers_init:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
failed_hw_setup:
rsxx_eeh_failure(dev);
diff --combined drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 138759dc8190,52c7785c0db1..0bf8f29c5ae8
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@@ -185,8 -185,7 +185,8 @@@ static struct sun8i_ce_alg_template ce_
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -212,8 -211,7 +212,8 @@@
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -238,8 -236,7 +238,8 @@@
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -265,8 -262,7 +265,8 @@@
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -314,7 -310,7 +314,7 @@@ static int sun8i_ce_dbgfs_open(struct i
static const struct file_operations sun8i_ce_debugfs_fops = {
.owner = THIS_MODULE,
.open = sun8i_ce_dbgfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 9a23515783a6,fde536469527..d9767e2c84d9
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@@ -169,8 -169,7 +169,8 @@@ static struct sun8i_ss_alg_template ss_
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -196,8 -195,7 +196,8 @@@
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -222,8 -220,7 +222,8 @@@
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -249,8 -246,7 +249,8 @@@
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -298,7 -294,7 +298,7 @@@ static int sun8i_ss_dbgfs_open(struct i
static const struct file_operations sun8i_ss_debugfs_fops = {
.owner = THIS_MODULE,
.open = sun8i_ss_dbgfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/crypto/amlogic/amlogic-gxl-core.c
index 466552acbbbb,112be2441369..be4dcdb225d8
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@@ -54,8 -54,7 +54,8 @@@ static struct meson_alg_template mc_alg
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -80,8 -79,7 +80,8 @@@
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@@ -127,7 -125,7 +127,7 @@@ static int meson_dbgfs_open(struct inod
static const struct file_operations meson_debugfs_fops = {
.owner = THIS_MODULE,
.open = meson_dbgfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/crypto/hisilicon/qm.c
index 6527c53b073f,a5c3e21e0fe6..b594dc72807c
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@@ -1054,7 -1054,7 +1054,7 @@@ static int qm_regs_open(struct inode *i
static const struct file_operations qm_regs_fops = {
.owner = THIS_MODULE,
.open = qm_regs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.release = single_release,
};
@@@ -1064,10 -1064,19 +1064,10 @@@ static ssize_t qm_cmd_read(struct file
char buf[QM_DBG_READ_LEN];
int len;
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
-
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
-
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
@@@ -1732,7 -1741,7 +1732,7 @@@ void hisi_qm_release_qp(struct hisi_qp
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
-static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
+static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
@@@ -1804,7 -1813,7 +1804,7 @@@ static int qm_start_qp_nolock(struct hi
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
int qp_id = qp->qp_id;
- int pasid = arg;
+ u32 pasid = arg;
int ret;
if (!qm_qp_avail_state(qm, qp, QP_START))
@@@ -2170,12 -2179,8 +2170,12 @@@ static int qm_alloc_uacce(struct hisi_q
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
};
+ int ret;
- strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
+ ret = strscpy(interface.name, pdev->driver->name,
+ sizeof(interface.name));
+ if (ret < 0)
+ return -ENAMETOOLONG;
uacce = uacce_alloc(&pdev->dev, &interface);
if (IS_ERR(uacce))
@@@ -2686,12 -2691,24 +2686,12 @@@ static ssize_t qm_status_read(struct fi
{
struct hisi_qm *qm = filp->private_data;
char buf[QM_DBG_READ_LEN];
- int val, cp_len, len;
-
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
+ int val, len;
val = atomic_read(&qm->status.flags);
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- if (!len)
- return -EFAULT;
-
- cp_len = copy_to_user(buffer, buf, len);
- if (cp_len)
- return -EFAULT;
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static const struct file_operations qm_status_fops = {
diff --combined drivers/crypto/qat/qat_common/adf_cfg.c
index ac462796cefc,4b77c6f05b3e..44c7027f8096
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@@ -1,5 -1,49 +1,5 @@@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux(a)intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/list.h>
@@@ -66,7 -110,7 +66,7 @@@ static int qat_dev_cfg_open(struct inod
static const struct file_operations qat_dev_cfg_fops = {
.open = qat_dev_cfg_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release
};
diff --combined drivers/crypto/qat/qat_common/adf_transport_debug.c
index 2a2eccbf56ec,6133a869aeb1..659917c848d9
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@@ -1,5 -1,49 +1,5 @@@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux(a)intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
@@@ -98,7 -142,7 +98,7 @@@ static int adf_ring_open(struct inode *
static const struct file_operations adf_ring_debug_fops = {
.open = adf_ring_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release
};
@@@ -209,7 -253,7 +209,7 @@@ static int adf_bank_open(struct inode *
static const struct file_operations adf_bank_debug_fops = {
.open = adf_bank_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release
};
diff --combined drivers/firmware/tegra/bpmp-debugfs.c
index c1bbba9ee93a,3f0a89ce131a..e861d407b7d1
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@@ -4,14 -4,11 +4,14 @@@
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
+static DEFINE_MUTEX(bpmp_debug_lock);
+
struct seqbuf {
char *buf;
size_t pos;
@@@ -99,354 -96,6 +99,354 @@@ static const char *get_filename(struct
return filename;
}
+static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
+ uint32_t *fd, uint32_t *len, bool write)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ ssize_t sz_name;
+ int err = 0;
+
+ sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
+ if (sz_name < 0) {
+ pr_err("File name too large: %s\n", name);
+ return -EINVAL;
+ }
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *len = resp.fop.datalen;
+ *fd = resp.fop.fd;
+
+ return 0;
+}
+
+static int mrq_debug_close(struct tegra_bpmp *bpmp, uint32_t fd)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_CLOSE),
+ .frd = {
+ .fd = fd,
+ },
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err = 0;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
+ char *data, size_t sz_data, uint32_t *nbytes)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_READ),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ int remaining, err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ if (len > sz_data) {
+ err = -EFBIG;
+ goto close;
+ }
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ memcpy(data, resp.frd.data, resp.frd.readlen);
+ data += resp.frd.readlen;
+ remaining -= resp.frd.readlen;
+ }
+
+ *nbytes = len;
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
+ uint8_t *data, size_t sz_data)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_WRITE)
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ size_t remaining;
+ int err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 1);
+ if (err)
+ goto out;
+
+ if (sz_data > len) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ req.fwr.fd = fd;
+ remaining = sz_data;
+
+ while (remaining > 0) {
+ len = min(remaining, sizeof(req.fwr.data));
+ memcpy(req.fwr.data, data, len);
+ req.fwr.datalen = len;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ data += req.fwr.datalen;
+ remaining -= req.fwr.datalen;
+ }
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int bpmp_debug_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ uint32_t nbytes = 0;
+ size_t len;
+ int err;
+
+ len = seq_get_buf(m, &databuf);
+ if (!databuf)
+ return -ENOMEM;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes);
+ if (!err)
+ seq_commit(m, nbytes);
+
+ return err;
+}
+
+static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ ssize_t err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ databuf = kmalloc(count, GFP_KERNEL);
+ if (!databuf)
+ return -ENOMEM;
+
+ if (copy_from_user(databuf, buf, count)) {
+ err = -EFAULT;
+ goto free_ret;
+ }
+
+ err = mrq_debug_write(bpmp, filename, databuf, count);
+
+free_ret:
+ kfree(databuf);
+
+ return err ?: count;
+}
+
+static int bpmp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, bpmp_debug_show, file, SZ_256K);
+}
+
+static const struct file_operations bpmp_debug_fops = {
+ .open = bpmp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = bpmp_debug_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
+ struct dentry *parent,
+ char *ppath)
+{
+ const size_t pathlen = SZ_256;
+ const size_t bufsize = SZ_16K;
+ uint32_t dsize, attrs = 0;
+ struct dentry *dentry;
+ struct seqbuf seqbuf;
+ char *buf, *pathbuf;
+ const char *name;
+ int err = 0;
+
+ if (!bpmp || !parent || !ppath)
+ return -EINVAL;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pathbuf = kzalloc(pathlen, GFP_KERNEL);
+ if (!pathbuf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
+ if (err)
+ goto out;
+
+ seqbuf_init(&seqbuf, buf, dsize);
+
+ while (!seqbuf_eof(&seqbuf)) {
+ err = seqbuf_read_u32(&seqbuf, &attrs);
+ if (err)
+ goto out;
+
+ err = seqbuf_read_str(&seqbuf, &name);
+ if (err < 0)
+ goto out;
+
+ if (attrs & DEBUGFS_S_ISDIR) {
+ size_t len;
+
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ len = strlen(ppath) + strlen(name) + 1;
+ if (len >= pathlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ strncpy(pathbuf, ppath, pathlen);
+ strncat(pathbuf, name, strlen(name));
+ strcat(pathbuf, "/");
+
+ err = bpmp_populate_debugfs_inband(bpmp, dentry,
+ pathbuf);
+ if (err < 0)
+ goto out;
+ } else {
+ umode_t mode;
+
+ mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
+ mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
+ dentry = debugfs_create_file(name, mode, parent, bpmp,
+ &bpmp_debug_fops);
+ if (!dentry) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(pathbuf);
+ kfree(buf);
+
+ return err;
+}
+
static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
dma_addr_t name, size_t sz_name,
dma_addr_t data, size_t sz_data,
@@@ -478,8 -127,6 +478,8 @@@
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.fop.nbytes;
@@@ -537,8 -184,6 +537,8 @@@ static int mrq_debugfs_dumpdir(struct t
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.dumpdir.nbytes;
@@@ -557,7 -202,7 +557,7 @@@ static int debugfs_show(struct seq_fil
char buf[256];
const char *filename;
size_t len, nbytes;
- int ret;
+ int err;
filename = get_filename(bpmp, file, buf, sizeof(buf));
if (!filename)
@@@ -571,24 -216,24 +571,24 @@@
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
len = strlen(filename);
strncpy(namevirt, filename, namesize);
- ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
&nbytes);
- if (!ret)
+ if (!err)
seq_write(m, datavirt, nbytes);
dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret;
+ return err;
}
static int debugfs_open(struct inode *inode, struct file *file)
@@@ -608,7 -253,7 +608,7 @@@ static ssize_t debugfs_store(struct fil
char fnamebuf[256];
const char *filename;
size_t len;
- int ret;
+ int err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
@@@ -622,7 -267,7 +622,7 @@@
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
@@@ -630,11 -275,11 +630,11 @@@
strncpy(namevirt, filename, namesize);
if (copy_from_user(datavirt, buf, count)) {
- ret = -EFAULT;
+ err = -EFAULT;
goto free_databuf;
}
- ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
count);
free_databuf:
@@@ -642,12 -287,12 +642,12 @@@
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret ?: count;
+ return err ?: count;
}
static const struct file_operations debugfs_fops = {
.open = debugfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = debugfs_store,
.release = single_release,
@@@ -705,66 -350,59 +705,66 @@@ static int bpmp_populate_dir(struct teg
return 0;
}
-static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
- size_t bufsize, struct dentry *root)
+static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
{
struct seqbuf seqbuf;
+ const size_t sz = SZ_512K;
+ dma_addr_t phys;
+ size_t nbytes;
+ void *virt;
int err;
- bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror)
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
return -ENOMEM;
- seqbuf_init(&seqbuf, buf, bufsize);
- err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+ err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
if (err < 0) {
- debugfs_remove_recursive(bpmp->debugfs_mirror);
- bpmp->debugfs_mirror = NULL;
+ goto free;
+ } else if (nbytes > sz) {
+ err = -EINVAL;
+ goto free;
}
+ seqbuf_init(&seqbuf, virt, nbytes);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+
return err;
}
int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
{
- dma_addr_t phys;
- void *virt;
- const size_t sz = SZ_256K;
- size_t nbytes;
- int ret;
struct dentry *root;
+ bool inband;
+ int err;
- if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
+
+ if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
return 0;
root = debugfs_create_dir("bpmp", NULL);
if (!root)
return -ENOMEM;
- virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
- GFP_KERNEL | GFP_DMA32);
- if (!virt) {
- ret = -ENOMEM;
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (!bpmp->debugfs_mirror) {
+ err = -ENOMEM;
goto out;
}
- ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
- if (ret < 0)
- goto free;
+ if (inband)
+ err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
+ "/");
+ else
+ err = bpmp_populate_debugfs_shmem(bpmp);
- ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
-free:
- dma_free_coherent(bpmp->dev, sz, virt, phys);
out:
- if (ret < 0)
- debugfs_remove(root);
+ if (err < 0)
+ debugfs_remove_recursive(root);
- return ret;
+ return err;
}
diff --combined drivers/gpio/gpiolib.c
index 80137c1b3cdc,0ca1acff0e37..25ef7ed30a06
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@@ -17,16 -17,20 +17,16 @@@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/cdev.h>
#include <linux/fs.h>
-#include <linux/uaccess.h>
#include <linux/compat.h>
-#include <linux/anon_inodes.h>
#include <linux/file.h>
-#include <linux/kfifo.h>
-#include <linux/poll.h>
-#include <linux/timekeeping.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
#include "gpiolib-of.h"
#include "gpiolib-acpi.h"
+#include "gpiolib-cdev.h"
+#include "gpiolib-sysfs.h"
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@@ -421,6 -425,1105 +421,6 @@@ bool gpiochip_line_is_valid(const struc
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
-/*
- * GPIO line handle management
- */
-
-/**
- * struct linehandle_state - contains the state of a userspace handle
- * @gdev: the GPIO device the handle pertains to
- * @label: consumer label used to tag descriptors
- * @descs: the GPIO descriptors held by this handle
- * @numdescs: the number of descriptors held in the descs array
- */
-struct linehandle_state {
- struct gpio_device *gdev;
- const char *label;
- struct gpio_desc *descs[GPIOHANDLES_MAX];
- u32 numdescs;
-};
-
-#define GPIOHANDLE_REQUEST_VALID_FLAGS \
- (GPIOHANDLE_REQUEST_INPUT | \
- GPIOHANDLE_REQUEST_OUTPUT | \
- GPIOHANDLE_REQUEST_ACTIVE_LOW | \
- GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
- GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
- GPIOHANDLE_REQUEST_BIAS_DISABLE | \
- GPIOHANDLE_REQUEST_OPEN_DRAIN | \
- GPIOHANDLE_REQUEST_OPEN_SOURCE)
-
-static int linehandle_validate_flags(u32 flags)
-{
- /* Return an error if an unknown flag is set */
- if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
- return -EINVAL;
-
- /*
- * Do not allow both INPUT & OUTPUT flags to be set as they are
- * contradictory.
- */
- if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
- (flags & GPIOHANDLE_REQUEST_OUTPUT))
- return -EINVAL;
-
- /*
- * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
- * the hardware actually supports enabling both at the same time the
- * electrical result would be disastrous.
- */
- if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
- (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
- return -EINVAL;
-
- /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
- if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
- ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
- return -EINVAL;
-
- /* Bias flags only allowed for input or output mode. */
- if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
- (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
- ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
- (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
- (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
- return -EINVAL;
-
- /* Only one bias flag can be set. */
- if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
- (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
- GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
- ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
- (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
- return -EINVAL;
-
- return 0;
-}
-
-static long linehandle_set_config(struct linehandle_state *lh,
- void __user *ip)
-{
- struct gpiohandle_config gcnf;
- struct gpio_desc *desc;
- int i, ret;
- u32 lflags;
- unsigned long *flagsp;
-
- if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
- return -EFAULT;
-
- lflags = gcnf.flags;
- ret = linehandle_validate_flags(lflags);
- if (ret)
- return ret;
-
- for (i = 0; i < lh->numdescs; i++) {
- desc = lh->descs[i];
- flagsp = &desc->flags;
-
- assign_bit(FLAG_ACTIVE_LOW, flagsp,
- lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
-
- assign_bit(FLAG_OPEN_DRAIN, flagsp,
- lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
-
- assign_bit(FLAG_OPEN_SOURCE, flagsp,
- lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
-
- assign_bit(FLAG_PULL_UP, flagsp,
- lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
-
- assign_bit(FLAG_PULL_DOWN, flagsp,
- lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
-
- assign_bit(FLAG_BIAS_DISABLE, flagsp,
- lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
-
- /*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
- */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
- int val = !!gcnf.default_values[i];
-
- ret = gpiod_direction_output(desc, val);
- if (ret)
- return ret;
- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
- ret = gpiod_direction_input(desc);
- if (ret)
- return ret;
- }
-
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_CONFIG, desc);
- }
- return 0;
-}
-
-static long linehandle_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- struct linehandle_state *lh = filep->private_data;
- void __user *ip = (void __user *)arg;
- struct gpiohandle_data ghd;
- DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
- int i;
-
- if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- /* NOTE: It's ok to read values of output lines. */
- int ret = gpiod_get_array_value_complex(false,
- true,
- lh->numdescs,
- lh->descs,
- NULL,
- vals);
- if (ret)
- return ret;
-
- memset(&ghd, 0, sizeof(ghd));
- for (i = 0; i < lh->numdescs; i++)
- ghd.values[i] = test_bit(i, vals);
-
- if (copy_to_user(ip, &ghd, sizeof(ghd)))
- return -EFAULT;
-
- return 0;
- } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- /*
- * All line descriptors were created at once with the same
- * flags so just check if the first one is really output.
- */
- if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
- return -EPERM;
-
- if (copy_from_user(&ghd, ip, sizeof(ghd)))
- return -EFAULT;
-
- /* Clamp all values to [0,1] */
- for (i = 0; i < lh->numdescs; i++)
- __assign_bit(i, vals, ghd.values[i]);
-
- /* Reuse the array setting function */
- return gpiod_set_array_value_complex(false,
- true,
- lh->numdescs,
- lh->descs,
- NULL,
- vals);
- } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) {
- return linehandle_set_config(lh, ip);
- }
- return -EINVAL;
-}
-
-#ifdef CONFIG_COMPAT
-static long linehandle_ioctl_compat(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- return linehandle_ioctl(filep, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static int linehandle_release(struct inode *inode, struct file *filep)
-{
- struct linehandle_state *lh = filep->private_data;
- struct gpio_device *gdev = lh->gdev;
- int i;
-
- for (i = 0; i < lh->numdescs; i++)
- gpiod_free(lh->descs[i]);
- kfree(lh->label);
- kfree(lh);
- put_device(&gdev->dev);
- return 0;
-}
-
-static const struct file_operations linehandle_fileops = {
- .release = linehandle_release,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
- .unlocked_ioctl = linehandle_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = linehandle_ioctl_compat,
-#endif
-};
-
-static int linehandle_create(struct gpio_device *gdev, void __user *ip)
-{
- struct gpiohandle_request handlereq;
- struct linehandle_state *lh;
- struct file *file;
- int fd, i, count = 0, ret;
- u32 lflags;
-
- if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
- return -EFAULT;
- if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
- return -EINVAL;
-
- lflags = handlereq.flags;
-
- ret = linehandle_validate_flags(lflags);
- if (ret)
- return ret;
-
- lh = kzalloc(sizeof(*lh), GFP_KERNEL);
- if (!lh)
- return -ENOMEM;
- lh->gdev = gdev;
- get_device(&gdev->dev);
-
- /* Make sure this is terminated */
- handlereq.consumer_label[sizeof(handlereq.consumer_label)-1] = '\0';
- if (strlen(handlereq.consumer_label)) {
- lh->label = kstrdup(handlereq.consumer_label,
- GFP_KERNEL);
- if (!lh->label) {
- ret = -ENOMEM;
- goto out_free_lh;
- }
- }
-
- /* Request each GPIO */
- for (i = 0; i < handlereq.lines; i++) {
- u32 offset = handlereq.lineoffsets[i];
- struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
-
- if (IS_ERR(desc)) {
- ret = PTR_ERR(desc);
- goto out_free_descs;
- }
-
- ret = gpiod_request(desc, lh->label);
- if (ret)
- goto out_free_descs;
- lh->descs[i] = desc;
- count = i + 1;
-
- if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
-
- ret = gpiod_set_transitory(desc, false);
- if (ret < 0)
- goto out_free_descs;
-
- /*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
- */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
- int val = !!handlereq.default_values[i];
-
- ret = gpiod_direction_output(desc, val);
- if (ret)
- goto out_free_descs;
- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
- ret = gpiod_direction_input(desc);
- if (ret)
- goto out_free_descs;
- }
-
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
-
- dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
- offset);
- }
- /* Let i point at the last handle */
- i--;
- lh->numdescs = handlereq.lines;
-
- fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto out_free_descs;
- }
-
- file = anon_inode_getfile("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto out_put_unused_fd;
- }
-
- handlereq.fd = fd;
- if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- /*
- * fput() will trigger the release() callback, so do not go onto
- * the regular error cleanup path here.
- */
- fput(file);
- put_unused_fd(fd);
- return -EFAULT;
- }
-
- fd_install(fd, file);
-
- dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
- lh->numdescs);
-
- return 0;
-
-out_put_unused_fd:
- put_unused_fd(fd);
-out_free_descs:
- for (i = 0; i < count; i++)
- gpiod_free(lh->descs[i]);
- kfree(lh->label);
-out_free_lh:
- kfree(lh);
- put_device(&gdev->dev);
- return ret;
-}
-
-/*
- * GPIO line event management
- */
-
-/**
- * struct lineevent_state - contains the state of a userspace event
- * @gdev: the GPIO device the event pertains to
- * @label: consumer label used to tag descriptors
- * @desc: the GPIO descriptor held by this event
- * @eflags: the event flags this line was requested with
- * @irq: the interrupt that trigger in response to events on this GPIO
- * @wait: wait queue that handles blocking reads of events
- * @events: KFIFO for the GPIO events
- * @timestamp: cache for the timestamp storing it between hardirq
- * and IRQ thread, used to bring the timestamp close to the actual
- * event
- */
-struct lineevent_state {
- struct gpio_device *gdev;
- const char *label;
- struct gpio_desc *desc;
- u32 eflags;
- int irq;
- wait_queue_head_t wait;
- DECLARE_KFIFO(events, struct gpioevent_data, 16);
- u64 timestamp;
-};
-
-#define GPIOEVENT_REQUEST_VALID_FLAGS \
- (GPIOEVENT_REQUEST_RISING_EDGE | \
- GPIOEVENT_REQUEST_FALLING_EDGE)
-
-static __poll_t lineevent_poll(struct file *filep,
- struct poll_table_struct *wait)
-{
- struct lineevent_state *le = filep->private_data;
- __poll_t events = 0;
-
- poll_wait(filep, &le->wait, wait);
-
- if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
- events = EPOLLIN | EPOLLRDNORM;
-
- return events;
-}
-
-
-static ssize_t lineevent_read(struct file *filep,
- char __user *buf,
- size_t count,
- loff_t *f_ps)
-{
- struct lineevent_state *le = filep->private_data;
- struct gpioevent_data ge;
- ssize_t bytes_read = 0;
- int ret;
-
- if (count < sizeof(ge))
- return -EINVAL;
-
- do {
- spin_lock(&le->wait.lock);
- if (kfifo_is_empty(&le->events)) {
- if (bytes_read) {
- spin_unlock(&le->wait.lock);
- return bytes_read;
- }
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock(&le->wait.lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_locked(le->wait,
- !kfifo_is_empty(&le->events));
- if (ret) {
- spin_unlock(&le->wait.lock);
- return ret;
- }
- }
-
- ret = kfifo_out(&le->events, &ge, 1);
- spin_unlock(&le->wait.lock);
- if (ret != 1) {
- /*
- * This should never happen - we were holding the lock
- * from the moment we learned the fifo is no longer
- * empty until now.
- */
- ret = -EIO;
- break;
- }
-
- if (copy_to_user(buf + bytes_read, &ge, sizeof(ge)))
- return -EFAULT;
- bytes_read += sizeof(ge);
- } while (count >= bytes_read + sizeof(ge));
-
- return bytes_read;
-}
-
-static int lineevent_release(struct inode *inode, struct file *filep)
-{
- struct lineevent_state *le = filep->private_data;
- struct gpio_device *gdev = le->gdev;
-
- free_irq(le->irq, le);
- gpiod_free(le->desc);
- kfree(le->label);
- kfree(le);
- put_device(&gdev->dev);
- return 0;
-}
-
-static long lineevent_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- struct lineevent_state *le = filep->private_data;
- void __user *ip = (void __user *)arg;
- struct gpiohandle_data ghd;
-
- /*
- * We can get the value for an event line but not set it,
- * because it is input by definition.
- */
- if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- int val;
-
- memset(&ghd, 0, sizeof(ghd));
-
- val = gpiod_get_value_cansleep(le->desc);
- if (val < 0)
- return val;
- ghd.values[0] = val;
-
- if (copy_to_user(ip, &ghd, sizeof(ghd)))
- return -EFAULT;
-
- return 0;
- }
- return -EINVAL;
-}
-
-#ifdef CONFIG_COMPAT
-static long lineevent_ioctl_compat(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- return lineevent_ioctl(filep, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations lineevent_fileops = {
- .release = lineevent_release,
- .read = lineevent_read,
- .poll = lineevent_poll,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
- .unlocked_ioctl = lineevent_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lineevent_ioctl_compat,
-#endif
-};
-
-static irqreturn_t lineevent_irq_thread(int irq, void *p)
-{
- struct lineevent_state *le = p;
- struct gpioevent_data ge;
- int ret;
-
- /* Do not leak kernel stack to userspace */
- memset(&ge, 0, sizeof(ge));
-
- /*
- * We may be running from a nested threaded interrupt in which case
- * we didn't get the timestamp from lineevent_irq_handler().
- */
- if (!le->timestamp)
- ge.timestamp = ktime_get_ns();
- else
- ge.timestamp = le->timestamp;
-
- if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
- && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
- int level = gpiod_get_value_cansleep(le->desc);
- if (level)
- /* Emit low-to-high event */
- ge.id = GPIOEVENT_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
- /* Emit low-to-high event */
- ge.id = GPIOEVENT_EVENT_RISING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
- /* Emit high-to-low event */
- ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
- } else {
- return IRQ_NONE;
- }
-
- ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
- 1, &le->wait.lock);
- if (ret)
- wake_up_poll(&le->wait, EPOLLIN);
- else
- pr_debug_ratelimited("event FIFO is full - event dropped\n");
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t lineevent_irq_handler(int irq, void *p)
-{
- struct lineevent_state *le = p;
-
- /*
- * Just store the timestamp in hardirq context so we get it as
- * close in time as possible to the actual event.
- */
- le->timestamp = ktime_get_ns();
-
- return IRQ_WAKE_THREAD;
-}
-
-static int lineevent_create(struct gpio_device *gdev, void __user *ip)
-{
- struct gpioevent_request eventreq;
- struct lineevent_state *le;
- struct gpio_desc *desc;
- struct file *file;
- u32 offset;
- u32 lflags;
- u32 eflags;
- int fd;
- int ret;
- int irqflags = 0;
-
- if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
- return -EFAULT;
-
- offset = eventreq.lineoffset;
- lflags = eventreq.handleflags;
- eflags = eventreq.eventflags;
-
- desc = gpiochip_get_desc(gdev->chip, offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- /* Return an error if a unknown flag is set */
- if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
- (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
- return -EINVAL;
-
- /* This is just wrong: we don't look for events on output lines */
- if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
- return -EINVAL;
-
- /* Only one bias flag can be set. */
- if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
- (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
- GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
- ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
- (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
- return -EINVAL;
-
- le = kzalloc(sizeof(*le), GFP_KERNEL);
- if (!le)
- return -ENOMEM;
- le->gdev = gdev;
- get_device(&gdev->dev);
-
- /* Make sure this is terminated */
- eventreq.consumer_label[sizeof(eventreq.consumer_label)-1] = '\0';
- if (strlen(eventreq.consumer_label)) {
- le->label = kstrdup(eventreq.consumer_label,
- GFP_KERNEL);
- if (!le->label) {
- ret = -ENOMEM;
- goto out_free_le;
- }
- }
-
- ret = gpiod_request(desc, le->label);
- if (ret)
- goto out_free_label;
- le->desc = desc;
- le->eflags = eflags;
-
- if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
-
- ret = gpiod_direction_input(desc);
- if (ret)
- goto out_free_desc;
-
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
-
- le->irq = gpiod_to_irq(desc);
- if (le->irq <= 0) {
- ret = -ENODEV;
- goto out_free_desc;
- }
-
- if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
- if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- irqflags |= IRQF_ONESHOT;
-
- INIT_KFIFO(le->events);
- init_waitqueue_head(&le->wait);
-
- /* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
- lineevent_irq_handler,
- lineevent_irq_thread,
- irqflags,
- le->label,
- le);
- if (ret)
- goto out_free_desc;
-
- fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto out_free_irq;
- }
-
- file = anon_inode_getfile("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto out_put_unused_fd;
- }
-
- eventreq.fd = fd;
- if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- /*
- * fput() will trigger the release() callback, so do not go onto
- * the regular error cleanup path here.
- */
- fput(file);
- put_unused_fd(fd);
- return -EFAULT;
- }
-
- fd_install(fd, file);
-
- return 0;
-
-out_put_unused_fd:
- put_unused_fd(fd);
-out_free_irq:
- free_irq(le->irq, le);
-out_free_desc:
- gpiod_free(le->desc);
-out_free_label:
- kfree(le->label);
-out_free_le:
- kfree(le);
- put_device(&gdev->dev);
- return ret;
-}
-
-static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
- struct gpioline_info *info)
-{
- struct gpio_chip *gc = desc->gdev->chip;
- bool ok_for_pinctrl;
- unsigned long flags;
-
- /*
- * This function takes a mutex so we must check this before taking
- * the spinlock.
- *
- * FIXME: find a non-racy way to retrieve this information. Maybe a
- * lock common to both frameworks?
- */
- ok_for_pinctrl =
- pinctrl_gpio_can_use_line(gc->base + info->line_offset);
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- if (desc->name) {
- strncpy(info->name, desc->name, sizeof(info->name));
- info->name[sizeof(info->name) - 1] = '\0';
- } else {
- info->name[0] = '\0';
- }
-
- if (desc->label) {
- strncpy(info->consumer, desc->label, sizeof(info->consumer));
- info->consumer[sizeof(info->consumer) - 1] = '\0';
- } else {
- info->consumer[0] = '\0';
- }
-
- /*
- * Userspace only need to know that the kernel is using this GPIO so
- * it can't use it.
- */
- info->flags = 0;
- if (test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_IS_HOGGED, &desc->flags) ||
- test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags) ||
- test_bit(FLAG_SYSFS, &desc->flags) ||
- !ok_for_pinctrl)
- info->flags |= GPIOLINE_FLAG_KERNEL;
- if (test_bit(FLAG_IS_OUT, &desc->flags))
- info->flags |= GPIOLINE_FLAG_IS_OUT;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- info->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- info->flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- info->flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
- if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-struct gpio_chardev_data {
- struct gpio_device *gdev;
- wait_queue_head_t wait;
- DECLARE_KFIFO(events, struct gpioline_info_changed, 32);
- struct notifier_block lineinfo_changed_nb;
- unsigned long *watched_lines;
-};
-
-/*
- * gpio_ioctl() - ioctl handler for the GPIO chardev
- */
-static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct gpio_chardev_data *priv = filp->private_data;
- struct gpio_device *gdev = priv->gdev;
- struct gpio_chip *gc = gdev->chip;
- void __user *ip = (void __user *)arg;
- struct gpio_desc *desc;
- __u32 offset;
- int hwgpio;
-
- /* We fail any subsequent ioctl():s when the chip is gone */
- if (!gc)
- return -ENODEV;
-
- /* Fill in the struct and pass to userspace */
- if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
- struct gpiochip_info chipinfo;
-
- memset(&chipinfo, 0, sizeof(chipinfo));
-
- strncpy(chipinfo.name, dev_name(&gdev->dev),
- sizeof(chipinfo.name));
- chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
- strncpy(chipinfo.label, gdev->label,
- sizeof(chipinfo.label));
- chipinfo.label[sizeof(chipinfo.label)-1] = '\0';
- chipinfo.lines = gdev->ngpio;
- if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
- return -EFAULT;
- return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
- struct gpioline_info lineinfo;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- hwgpio = gpio_chip_hwgpio(desc);
-
- gpio_desc_to_lineinfo(desc, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
- return 0;
- } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
- return linehandle_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
- return lineevent_create(gdev, ip);
- } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
- struct gpioline_info lineinfo;
-
- if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
- return -EFAULT;
-
- desc = gpiochip_get_desc(gc, lineinfo.line_offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- hwgpio = gpio_chip_hwgpio(desc);
-
- if (test_bit(hwgpio, priv->watched_lines))
- return -EBUSY;
-
- gpio_desc_to_lineinfo(desc, &lineinfo);
-
- if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
- return -EFAULT;
-
- set_bit(hwgpio, priv->watched_lines);
- return 0;
- } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
- if (copy_from_user(&offset, ip, sizeof(offset)))
- return -EFAULT;
-
- desc = gpiochip_get_desc(gc, offset);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- hwgpio = gpio_chip_hwgpio(desc);
-
- if (!test_bit(hwgpio, priv->watched_lines))
- return -EBUSY;
-
- clear_bit(hwgpio, priv->watched_lines);
- return 0;
- }
- return -EINVAL;
-}
-
-#ifdef CONFIG_COMPAT
-static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static struct gpio_chardev_data *
-to_gpio_chardev_data(struct notifier_block *nb)
-{
- return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
-}
-
-static int lineinfo_changed_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_chardev_data *priv = to_gpio_chardev_data(nb);
- struct gpioline_info_changed chg;
- struct gpio_desc *desc = data;
- int ret;
-
- if (!test_bit(gpio_chip_hwgpio(desc), priv->watched_lines))
- return NOTIFY_DONE;
-
- memset(&chg, 0, sizeof(chg));
- chg.info.line_offset = gpio_chip_hwgpio(desc);
- chg.event_type = action;
- chg.timestamp = ktime_get_ns();
- gpio_desc_to_lineinfo(desc, &chg.info);
-
- ret = kfifo_in_spinlocked(&priv->events, &chg, 1, &priv->wait.lock);
- if (ret)
- wake_up_poll(&priv->wait, EPOLLIN);
- else
- pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
-
- return NOTIFY_OK;
-}
-
-static __poll_t lineinfo_watch_poll(struct file *filep,
- struct poll_table_struct *pollt)
-{
- struct gpio_chardev_data *priv = filep->private_data;
- __poll_t events = 0;
-
- poll_wait(filep, &priv->wait, pollt);
-
- if (!kfifo_is_empty_spinlocked_noirqsave(&priv->events,
- &priv->wait.lock))
- events = EPOLLIN | EPOLLRDNORM;
-
- return events;
-}
-
-static ssize_t lineinfo_watch_read(struct file *filep, char __user *buf,
- size_t count, loff_t *off)
-{
- struct gpio_chardev_data *priv = filep->private_data;
- struct gpioline_info_changed event;
- ssize_t bytes_read = 0;
- int ret;
-
- if (count < sizeof(event))
- return -EINVAL;
-
- do {
- spin_lock(&priv->wait.lock);
- if (kfifo_is_empty(&priv->events)) {
- if (bytes_read) {
- spin_unlock(&priv->wait.lock);
- return bytes_read;
- }
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock(&priv->wait.lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_locked(priv->wait,
- !kfifo_is_empty(&priv->events));
- if (ret) {
- spin_unlock(&priv->wait.lock);
- return ret;
- }
- }
-
- ret = kfifo_out(&priv->events, &event, 1);
- spin_unlock(&priv->wait.lock);
- if (ret != 1) {
- ret = -EIO;
- break;
- /* We should never get here. See lineevent_read(). */
- }
-
- if (copy_to_user(buf + bytes_read, &event, sizeof(event)))
- return -EFAULT;
- bytes_read += sizeof(event);
- } while (count >= bytes_read + sizeof(event));
-
- return bytes_read;
-}
-
-/**
- * gpio_chrdev_open() - open the chardev for ioctl operations
- * @inode: inode for this chardev
- * @filp: file struct for storing private data
- * Returns 0 on success
- */
-static int gpio_chrdev_open(struct inode *inode, struct file *filp)
-{
- struct gpio_device *gdev = container_of(inode->i_cdev,
- struct gpio_device, chrdev);
- struct gpio_chardev_data *priv;
- int ret = -ENOMEM;
-
- /* Fail on open if the backing gpiochip is gone */
- if (!gdev->chip)
- return -ENODEV;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
- if (!priv->watched_lines)
- goto out_free_priv;
-
- init_waitqueue_head(&priv->wait);
- INIT_KFIFO(priv->events);
- priv->gdev = gdev;
-
- priv->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = atomic_notifier_chain_register(&gdev->notifier,
- &priv->lineinfo_changed_nb);
- if (ret)
- goto out_free_bitmap;
-
- get_device(&gdev->dev);
- filp->private_data = priv;
-
- ret = nonseekable_open(inode, filp);
- if (ret)
- goto out_unregister_notifier;
-
- return ret;
-
-out_unregister_notifier:
- atomic_notifier_chain_unregister(&gdev->notifier,
- &priv->lineinfo_changed_nb);
-out_free_bitmap:
- bitmap_free(priv->watched_lines);
-out_free_priv:
- kfree(priv);
- return ret;
-}
-
-/**
- * gpio_chrdev_release() - close chardev after ioctl operations
- * @inode: inode for this chardev
- * @filp: file struct for storing private data
- * Returns 0 on success
- */
-static int gpio_chrdev_release(struct inode *inode, struct file *filp)
-{
- struct gpio_chardev_data *priv = filp->private_data;
- struct gpio_device *gdev = priv->gdev;
-
- bitmap_free(priv->watched_lines);
- atomic_notifier_chain_unregister(&gdev->notifier,
- &priv->lineinfo_changed_nb);
- put_device(&gdev->dev);
- kfree(priv);
-
- return 0;
-}
-
-static const struct file_operations gpio_fileops = {
- .release = gpio_chrdev_release,
- .open = gpio_chrdev_open,
- .poll = lineinfo_watch_poll,
- .read = lineinfo_watch_read,
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = gpio_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = gpio_ioctl_compat,
-#endif
-};
-
static void gpiodevice_release(struct device *dev)
{
struct gpio_device *gdev = dev_get_drvdata(dev);
@@@ -436,10 -1539,17 +436,10 @@@ static int gpiochip_setup_dev(struct gp
{
int ret;
- cdev_init(&gdev->chrdev, &gpio_fileops);
- gdev->chrdev.owner = THIS_MODULE;
- gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
-
- ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
+ ret = gpiolib_cdev_register(gdev, gpio_devt);
if (ret)
return ret;
- chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
- MAJOR(gpio_devt), gdev->id);
-
ret = gpiochip_sysfs_register(gdev);
if (ret)
goto err_remove_device;
@@@ -452,7 -1562,7 +452,7 @@@
return 0;
err_remove_device:
- cdev_device_del(&gdev->chrdev, &gdev->dev);
+ gpiolib_cdev_unregister(gdev);
return ret;
}
@@@ -615,7 -1725,7 +615,7 @@@ int gpiochip_add_data_with_key(struct g
spin_unlock_irqrestore(&gpio_lock, flags);
- ATOMIC_INIT_NOTIFIER_HEAD(&gdev->notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier);
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
@@@ -774,7 -1884,7 +774,7 @@@ void gpiochip_remove(struct gpio_chip *
* be removed, else it will be dangling until the last user is
* gone.
*/
- cdev_device_del(&gdev->chrdev, &gdev->dev);
+ gpiolib_cdev_unregister(gdev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@@ -2049,8 -3159,8 +2049,8 @@@ static bool gpiod_free_commit(struct gp
}
spin_unlock_irqrestore(&gpio_lock, flags);
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_RELEASED, desc);
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_RELEASED, desc);
return ret;
}
@@@ -2595,9 -3705,10 +2595,9 @@@ int gpiod_get_array_value_complex(bool
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- if (bitmap_full(array_info->get_mask, array_size))
- return 0;
-
i = find_first_zero_bit(array_info->get_mask, array_size);
+ if (i == array_size)
+ return 0;
} else {
array_info = NULL;
}
@@@ -2878,9 -3989,10 +2878,9 @@@ int gpiod_set_array_value_complex(bool
gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
value_bitmap);
- if (bitmap_full(array_info->set_mask, array_size))
- return 0;
-
i = find_first_zero_bit(array_info->set_mask, array_size);
+ if (i == array_size)
+ return 0;
} else {
array_info = NULL;
}
@@@ -3927,8 -5039,8 +3927,8 @@@ struct gpio_desc *__must_check gpiod_ge
return ERR_PTR(ret);
}
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
return desc;
}
@@@ -3995,8 -5107,8 +3995,8 @@@ struct gpio_desc *fwnode_get_named_gpio
return ERR_PTR(ret);
}
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
return desc;
}
@@@ -4417,7 -5529,7 +4417,7 @@@ static int gpiolib_open(struct inode *i
static const struct file_operations gpiolib_operations = {
.owner = THIS_MODULE,
.open = gpiolib_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined drivers/gpu/drm/arm/malidp_drv.c
index 69fee05c256c,029a26def9f7..c9e18eeddf2e
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@@ -349,11 -349,11 +349,11 @@@ malidp_verify_afbc_framebuffer_size(str
if (objs->size < afbc_size) {
DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n",
objs->size, afbc_size);
- drm_gem_object_put_unlocked(objs);
+ drm_gem_object_put(objs);
return false;
}
- drm_gem_object_put_unlocked(objs);
+ drm_gem_object_put(objs);
return true;
}
@@@ -542,7 -542,7 +542,7 @@@ static ssize_t malidp_debugfs_write(str
static const struct file_operations malidp_debugfs_fops = {
.owner = THIS_MODULE,
.open = malidp_debugfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = malidp_debugfs_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -563,7 -563,16 +563,7 @@@ static void malidp_debugfs_init(struct
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = malidp_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
@@@ -657,11 -666,20 +657,11 @@@ static ssize_t core_id_show(struct devi
static DEVICE_ATTR_RO(core_id);
-static int malidp_init_sysfs(struct device *dev)
-{
- int ret = device_create_file(dev, &dev_attr_core_id);
-
- if (ret)
- DRM_ERROR("failed to create device file for core_id\n");
-
- return ret;
-}
-
-static void malidp_fini_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_core_id);
-}
+static struct attribute *mali_dp_attrs[] = {
+ &dev_attr_core_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mali_dp);
#define MAX_OUTPUT_CHANNELS 3
@@@ -823,6 -841,10 +823,6 @@@ static int malidp_bind(struct device *d
if (ret < 0)
goto query_hw_fail;
- ret = malidp_init_sysfs(dev);
- if (ret)
- goto init_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
@@@ -848,6 -870,7 +848,6 @@@
drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
- drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
@@@ -879,6 -902,8 +879,6 @@@ irq_init_fail
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
-init_fail:
- malidp_fini_sysfs(dev);
malidp_fini(drm);
query_hw_fail:
pm_runtime_put(dev);
@@@ -904,13 -929,15 +904,13 @@@ static void malidp_unbind(struct devic
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
- drm_crtc_vblank_off(&malidp->crtc);
+ drm_atomic_helper_shutdown(drm);
malidp_se_irq_fini(hwdev);
malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
- drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- malidp_fini_sysfs(dev);
malidp_fini(drm);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
@@@ -1006,7 -1033,6 +1006,7 @@@ static struct platform_driver malidp_pl
.name = "mali-dp",
.pm = &malidp_pm_ops,
.of_match_table = malidp_drm_of_match,
+ .dev_groups = mali_dp_groups,
},
};
diff --combined drivers/gpu/drm/drm_debugfs.c
index 3d7182001004,a0a1a598826f..c4b2c861101f
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@@ -154,7 -154,7 +154,7 @@@ static int drm_debugfs_open(struct inod
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
.open = drm_debugfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -311,13 -311,13 +311,13 @@@ static ssize_t connector_write(struct f
buf[len] = '\0';
- if (!strcmp(buf, "on"))
+ if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
- else if (!strcmp(buf, "digital"))
+ else if (sysfs_streq(buf, "digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
- else if (!strcmp(buf, "off"))
+ else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
- else if (!strcmp(buf, "unspecified"))
+ else if (sysfs_streq(buf, "unspecified"))
connector->force = DRM_FORCE_UNSPECIFIED;
else
return -EINVAL;
@@@ -376,28 -376,10 +376,28 @@@ static ssize_t edid_write(struct file *
return (ret) ? ret : len;
}
+/*
+ * Returns the min and max vrr vfreq through the connector's debugfs file.
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
+ */
+static int vrr_range_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Min: %u\n",
(u8)connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n",
(u8)connector->display_info.monitor_range.max_vfreq);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vrr_range);
+
static const struct file_operations drm_edid_fops = {
.owner = THIS_MODULE,
.open = edid_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = edid_write
@@@ -407,7 -389,7 +407,7 @@@
static const struct file_operations drm_connector_fops = {
.owner = THIS_MODULE,
.open = connector_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = connector_write
@@@ -431,10 -413,6 +431,10 @@@ void drm_debugfs_connector_add(struct d
/* edid */
debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, connector,
&drm_edid_fops);
+
+ /* vrr range */
+ debugfs_create_file("vrr_range", S_IRUGO, root, connector,
+ &vrr_range_fops);
}
void drm_debugfs_connector_remove(struct drm_connector *connector)
diff --combined drivers/gpu/drm/drm_mipi_dbi.c
index 230c4fd7131c,d0298376d28a..2bef6c27855b
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@@ -217,7 -217,7 +217,7 @@@ int mipi_dbi_buf_copy(void *dst, struc
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab16(dst, src, fb, clip);
+ drm_fb_swab(dst, src, fb, clip, !import_attach);
else
drm_fb_memcpy(dst, src, fb, clip);
break;
@@@ -225,8 -225,9 +225,8 @@@
drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
- dev_err_once(fb->dev->dev, "Format is not supported: %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ drm_err_once(fb->dev, "Format is not supported: %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
}
@@@ -267,7 -268,7 +267,7 @@@ static void mipi_dbi_fb_dirty(struct dr
bool full;
void *tr;
- if (!dbidev->enabled)
+ if (WARN_ON(!fb))
return;
if (!drm_dev_enter(fb->dev, &idx))
@@@ -294,7 -295,7 +294,7 @@@
width * height * 2);
err_msg:
if (ret)
- dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+ drm_err_once(fb->dev, "Failed to update display %d\n", ret);
drm_dev_exit(idx);
}
@@@ -313,9 -314,6 +313,9 @@@ void mipi_dbi_pipe_update(struct drm_si
struct drm_plane_state *state = pipe->plane.state;
struct drm_rect rect;
+ if (!pipe->crtc.state->active)
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(state->fb, &rect);
}
@@@ -327,8 -325,9 +327,8 @@@ EXPORT_SYMBOL(mipi_dbi_pipe_update)
* @crtc_state: CRTC state
* @plane_state: Plane state
*
- * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
- * enables the backlight. Drivers can use this in their
- * &drm_simple_display_pipe_funcs->enable callback.
+ * Flushes the whole framebuffer and enables the backlight. Drivers can use this
+ * in their &drm_simple_display_pipe_funcs->enable callback.
*
* Note: Drivers which don't use mipi_dbi_pipe_update() because they have custom
* framebuffer flushing, can't use this function since they both use the same
@@@ -350,6 -349,7 +350,6 @@@ void mipi_dbi_enable_flush(struct mipi_
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- dbidev->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
backlight_enable(dbidev->backlight);
@@@ -390,8 -390,13 +390,8 @@@ void mipi_dbi_pipe_disable(struct drm_s
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
- if (!dbidev->enabled)
- return;
-
DRM_DEBUG_KMS("\n");
- dbidev->enabled = false;
-
if (dbidev->backlight)
backlight_disable(dbidev->backlight);
else
@@@ -918,7 -923,7 +918,7 @@@ static int mipi_dbi_spi1_transfer(struc
}
}
- tr.len = chunk;
+ tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);
@@@ -1274,7 -1279,7 +1274,7 @@@ static int mipi_dbi_debugfs_command_ope
static const struct file_operations mipi_dbi_debugfs_command_fops = {
.owner = THIS_MODULE,
.open = mipi_dbi_debugfs_command_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = mipi_dbi_debugfs_command_write,
diff --combined drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 3644752cc5ec,147448e6f750..067f24c47513
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@@ -125,7 -125,7 +125,7 @@@ static int i915_ips_status(struct seq_f
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
+ yesno(dev_priv->params.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@@ -1009,7 -1009,7 +1009,7 @@@ static ssize_t i915_ipc_status_write(st
static const struct file_operations i915_ipc_status_fops = {
.owner = THIS_MODULE,
.open = i915_ipc_status_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = i915_ipc_status_write
@@@ -1099,10 -1099,10 +1099,10 @@@ static void drrs_status_per_crtc(struc
seq_puts(m, "\n\t\t");
if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->fixed_mode);
} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->downclock_mode);
} else {
seq_printf(m, "DRRS_State: Unknown(%d)\n",
drrs->refresh_rate_type);
@@@ -1194,7 -1194,7 +1194,7 @@@ static int i915_dp_mst_info(struct seq_
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@@ -1207,14 -1207,14 +1207,14 @@@
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
+ dig_port = enc_to_dig_port(intel_encoder);
+ if (!dig_port->dp.can_mst)
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@@ -1326,7 -1326,7 +1326,7 @@@ static int i915_displayport_test_active
static const struct file_operations i915_displayport_test_active_fops = {
.owner = THIS_MODULE,
.open = i915_displayport_test_active_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = i915_displayport_test_active_write
@@@ -1627,7 -1627,7 +1627,7 @@@ static ssize_t cur_wm_latency_write(str
static const struct file_operations i915_pri_wm_latency_fops = {
.owner = THIS_MODULE,
.open = pri_wm_latency_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = pri_wm_latency_write
@@@ -1636,7 -1636,7 +1636,7 @@@
static const struct file_operations i915_spr_wm_latency_fops = {
.owner = THIS_MODULE,
.open = spr_wm_latency_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = spr_wm_latency_write
@@@ -1645,7 -1645,7 +1645,7 @@@
static const struct file_operations i915_cur_wm_latency_fops = {
.owner = THIS_MODULE,
.open = cur_wm_latency_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = cur_wm_latency_write
@@@ -1728,7 -1728,7 +1728,7 @@@ static int i915_hpd_storm_ctl_open(stru
static const struct file_operations i915_hpd_storm_ctl_fops = {
.owner = THIS_MODULE,
.open = i915_hpd_storm_ctl_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = i915_hpd_storm_ctl_write
@@@ -1801,7 -1801,7 +1801,7 @@@ static ssize_t i915_hpd_short_storm_ctl
static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.owner = THIS_MODULE,
.open = i915_hpd_short_storm_ctl_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = i915_hpd_short_storm_ctl_write,
@@@ -2179,7 -2179,7 +2179,7 @@@ static int i915_dsc_fec_support_open(st
static const struct file_operations i915_dsc_fec_support_fops = {
.owner = THIS_MODULE,
.open = i915_dsc_fec_support_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = i915_dsc_fec_support_write
@@@ -2218,8 -2218,7 +2218,8 @@@ int intel_connector_debugfs_add(struct
}
if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ !to_intel_connector(connector)->mst_port) ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP))
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
diff --combined drivers/gpu/drm/i915/i915_debugfs_params.c
index 4e2b077692cb,58ef14b028ea..19518e805bd6
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@@ -48,7 -48,7 +48,7 @@@ static ssize_t i915_param_int_write(str
static const struct file_operations i915_param_int_fops = {
.owner = THIS_MODULE,
.open = i915_param_int_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = i915_param_int_write,
.llseek = default_llseek,
.release = single_release,
@@@ -57,7 -57,7 +57,7 @@@
static const struct file_operations i915_param_int_fops_ro = {
.owner = THIS_MODULE,
.open = i915_param_int_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = default_llseek,
.release = single_release,
};
@@@ -101,7 -101,7 +101,7 @@@ static ssize_t i915_param_uint_write(st
static const struct file_operations i915_param_uint_fops = {
.owner = THIS_MODULE,
.open = i915_param_uint_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = i915_param_uint_write,
.llseek = default_llseek,
.release = single_release,
@@@ -110,7 -110,7 +110,7 @@@
static const struct file_operations i915_param_uint_fops_ro = {
.owner = THIS_MODULE,
.open = i915_param_uint_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = default_llseek,
.release = single_release,
};
@@@ -138,6 -138,9 +138,6 @@@ static ssize_t i915_param_charp_write(s
char **s = m->private;
char *new, *old;
- /* FIXME: remove locking after params aren't the module params */
- kernel_param_lock(THIS_MODULE);
-
old = *s;
new = strndup_user(ubuf, PAGE_SIZE);
if (IS_ERR(new)) {
@@@ -149,13 -152,15 +149,13 @@@
kfree(old);
out:
- kernel_param_unlock(THIS_MODULE);
-
return len;
}
static const struct file_operations i915_param_charp_fops = {
.owner = THIS_MODULE,
.open = i915_param_charp_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = i915_param_charp_write,
.llseek = default_llseek,
.release = single_release,
@@@ -164,7 -169,7 +164,7 @@@
static const struct file_operations i915_param_charp_fops_ro = {
.owner = THIS_MODULE,
.open = i915_param_charp_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = default_llseek,
.release = single_release,
};
@@@ -224,7 -229,7 +224,7 @@@ _i915_param_create_file(struct dentry *
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- struct i915_params *params = &i915_modparams;
+ struct i915_params *params = &i915->params;
struct dentry *dir;
dir = debugfs_create_dir("i915_params", minor->debugfs_root);
diff --combined drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index f272a8d0f95b,2fa8d07979eb..e5519da1e510
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@@ -389,14 -389,14 +389,14 @@@ static void dpu_crtc_frame_event_cb(voi
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) {
- DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id,
event);
return;
}
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+ kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
}
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
@@@ -1256,7 -1256,7 +1256,7 @@@ static const struct file_operations __p
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
- .read = seq_read, \
+ .read_iter = seq_read_iter, \
.llseek = seq_lseek, \
}
@@@ -1283,7 -1283,7 +1283,7 @@@ static int _dpu_crtc_init_debugfs(struc
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a97f6d2e5a08,40a7ce44f717..cc36f5480c60
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@@ -208,36 -208,6 +208,36 @@@ struct dpu_encoder_virt
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
+
+static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
+{
+ struct dpu_hw_dither_cfg dither_cfg = { 0 };
+
+ if (!hw_pp->ops.setup_dither)
+ return;
+
+ switch (bpc) {
+ case 6:
+ dither_cfg.c0_bitdepth = 6;
+ dither_cfg.c1_bitdepth = 6;
+ dither_cfg.c2_bitdepth = 6;
+ dither_cfg.c3_bitdepth = 6;
+ dither_cfg.temporal_en = 0;
+ break;
+ default:
+ hw_pp->ops.setup_dither(hw_pp, NULL);
+ return;
+ }
+
+ memcpy(&dither_cfg.matrix, dither_matrix,
+ sizeof(u32) * DITHER_MATRIX_SZ);
+
+ hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
+}
+
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
@@@ -529,6 -499,23 +529,6 @@@ void dpu_encoder_helper_split_config
}
}
-static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
- struct drm_display_mode *adj_mode)
-{
- struct drm_display_mode *cur_mode;
-
- if (!connector || !adj_mode)
- return;
-
- list_for_each_entry(cur_mode, &connector->modes, head) {
- if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private_flags |= cur_mode->private_flags;
- }
- }
-}
-
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
@@@ -602,6 -589,15 +602,6 @@@ static int dpu_encoder_virt_atomic_chec
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
- /*
- * display drivers may populate private fields of the drm display mode
- * structure while registering possible modes of a connector with DRM.
- * These private fields are not populated back while DRM invokes
- * the mode_set callbacks. This module retrieves and populates the
- * private fields of the given mode.
- */
- _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
-
/* perform atomic check on the first physical encoder (master) */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@@ -634,7 -630,8 +634,7 @@@
}
}
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
- adj_mode->private_flags);
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
return ret;
}
@@@ -1088,7 -1085,7 +1088,7 @@@ static void _dpu_encoder_virt_enable_he
{
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
+ int i;
if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
@@@ -1096,6 -1093,7 +1096,6 @@@
}
priv = drm_enc->dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
@@@ -1103,17 -1101,13 +1103,17 @@@
return;
}
- if (dpu_enc->cur_master->hw_mdptop &&
- dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
- dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
- dpu_enc->cur_master->hw_mdptop,
- dpu_kms->catalog);
-
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+ !WARN_ON(dpu_enc->num_phys_encs == 0)) {
+ unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_enc->hw_pp[i])
+ continue;
+ _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
+ }
+ }
}
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
@@@ -1891,7 -1885,7 +1891,7 @@@ static int _dpu_encoder_init_debugfs(st
static const struct file_operations debugfs_status_fops = {
.open = _dpu_encoder_debugfs_status_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index c0a4d4e16d82,282424a27262..ee6f86b6f0b5
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@@ -10,7 -10,6 +10,7 @@@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/of_irq.h>
+#include <linux/pm_opp.h>
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
@@@ -46,6 -45,20 +46,6 @@@
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
-static unsigned long dpu_iomap_size(struct platform_device *pdev,
- const char *name)
-{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (!res) {
- DRM_ERROR("failed to get memory resource: %s\n", name);
- return 0;
- }
-
- return resource_size(res);
-}
-
#ifdef CONFIG_DEBUG_FS
static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
@@@ -94,7 -107,7 +94,7 @@@ static const struct file_operations __p
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
- .read = seq_read, \
+ .read_iter = seq_read_iter, \
.llseek = seq_lseek, \
}
@@@ -163,7 -176,7 +163,7 @@@ static int dpu_debugfs_open_regset32(st
static const struct file_operations dpu_fops_regset32 = {
.open = dpu_debugfs_open_regset32,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -831,6 -844,7 +831,6 @@@ static int dpu_kms_hw_init(struct msm_k
goto error;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
- dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif",
"vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
@@@ -839,16 -853,22 +839,16 @@@
dpu_kms->vbif[VBIF_RT] = NULL;
goto error;
}
- dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
- dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt",
"vbif_nrt");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt",
"vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
- } else {
- dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
- "vbif_nrt");
}
- dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma",
"regdma");
+ dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma",
"regdma");
if (IS_ERR(dpu_kms->reg_dma)) {
dpu_kms->reg_dma = NULL;
DPU_DEBUG("REG_DMA is not defined");
- } else {
- dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@@ -1005,24 -1025,11 +1005,24 @@@ static int dpu_bind(struct device *dev
if (!dpu_kms)
return -ENOMEM;
+ dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(dpu_kms->opp_table))
+ return PTR_ERR(dpu_kms->opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(dev);
+ if (!ret) {
+ dpu_kms->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
+ return ret;
+ }
+
mp = &dpu_kms->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- return ret;
+ goto err;
}
platform_set_drvdata(pdev, dpu_kms);
@@@ -1036,11 -1043,6 +1036,11 @@@
priv->kms = &dpu_kms->base;
return ret;
+err:
+ if (dpu_kms->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
+ return ret;
}
static void dpu_unbind(struct device *dev, struct device *master, void *data)
@@@ -1055,10 -1057,6 +1055,10 @@@
if (dpu_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
+
+ if (dpu_kms->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
}
static const struct component_ops dpu_ops = {
@@@ -1084,8 -1082,6 +1084,8 @@@ static int __maybe_unused dpu_runtime_s
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
@@@ -1119,8 -1115,6 +1119,8 @@@ static int __maybe_unused dpu_runtime_r
static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id dpu_dt_match[] = {
diff --combined drivers/gpu/drm/nouveau/nouveau_debugfs.c
index c2bc05eb2e54,838394c6fd37..bccff7b3e7cc
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@@ -54,10 -54,8 +54,10 @@@ nouveau_debugfs_strap_peek(struct seq_f
int ret;
ret = pm_runtime_get_sync(drm->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev->dev);
return ret;
+ }
seq_printf(m, "0x%08x\n",
nvif_rd32(&drm->client.device.object, 0x101000));
@@@ -205,7 -203,7 +205,7 @@@ nouveau_debugfs_pstate_open(struct inod
static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = nouveau_debugfs_pstate_set,
};
@@@ -260,7 -258,7 +260,7 @@@ nouveau_debugfs_init(struct nouveau_dr
if (!drm->debugfs)
return -ENOMEM;
- ret = nvif_object_init(&drm->client.device.object, 0,
+ ret = nvif_object_ctor(&drm->client.device.object, "debugfsCtrl", 0,
NVIF_CLASS_CONTROL, NULL, 0,
&drm->debugfs->ctrl);
if (ret)
@@@ -273,7 -271,7 +273,7 @@@ voi
nouveau_debugfs_fini(struct nouveau_drm *drm)
{
if (drm->debugfs && drm->debugfs->ctrl.priv)
- nvif_object_fini(&drm->debugfs->ctrl);
+ nvif_object_dtor(&drm->debugfs->ctrl);
kfree(drm->debugfs);
drm->debugfs = NULL;
diff --combined drivers/gpu/host1x/debug.c
index 1b4997bda1c7,545fc0afb45a..3eee4318b158
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@@ -16,8 -16,6 +16,8 @@@
#include "debug.h"
#include "channel.h"
+static DEFINE_MUTEX(debug_lock);
+
unsigned int host1x_debug_trace_cmdbuf;
static pid_t host1x_debug_force_timeout_pid;
@@@ -54,14 -52,12 +54,14 @@@ static int show_channel(struct host1x_c
struct output *o = data;
mutex_lock(&ch->cdma.lock);
+ mutex_lock(&debug_lock);
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
host1x_hw_show_channel_cdma(m, ch, o);
+ mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
return 0;
@@@ -145,7 -141,7 +145,7 @@@ static int host1x_debug_open_all(struc
static const struct file_operations host1x_debug_all_fops = {
.open = host1x_debug_open_all,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -157,7 -153,7 +157,7 @@@ static int host1x_debug_open(struct ino
static const struct file_operations host1x_debug_fops = {
.open = host1x_debug_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/hwmon/dell-smm-hwmon.c
index ec448f5f2dc3,29ac38842962..976acbbe8ce9
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@@ -597,7 -597,7 +597,7 @@@ static int i8k_open_fs(struct inode *in
static const struct proc_ops i8k_proc_ops = {
.proc_open = i8k_open_fs,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_ioctl = i8k_ioctl,
@@@ -1187,14 -1187,6 +1187,14 @@@ static struct dmi_system_id i8k_whiteli
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
+ {
+ .ident = "Dell Latitude 5480",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude 5480"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
{
.ident = "Dell Latitude E6440",
.matches = {
diff --combined drivers/md/md.c
index 30fd009a91d5,0bae6c1523ce..e59a40e17f03
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@@ -101,8 -101,6 +101,8 @@@ static void mddev_detach(struct mddev *
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/* Default safemode delay: 200 msec */
+#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
@@@ -201,7 -199,7 +201,7 @@@ static int rdevs_init_serial(struct mdd
static int rdev_need_serial(struct md_rdev *rdev)
{
return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0
&&
- rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
test_bit(WriteMostly, &rdev->flags));
}
@@@ -465,46 -463,24 +465,46 @@@ check_suspended
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+struct md_io {
+ struct mddev *mddev;
+ bio_end_io_t *orig_bi_end_io;
+ void *orig_bi_private;
+ unsigned long start_time;
+};
+
+static void md_end_io(struct bio *bio)
+{
+ struct md_io *md_io = bio->bi_private;
+ struct mddev *mddev = md_io->mddev;
+
+ disk_end_io_acct(mddev->gendisk, bio_op(bio), md_io->start_time);
+
+ bio->bi_end_io = md_io->orig_bi_end_io;
+ bio->bi_private = md_io->orig_bi_private;
+
+ mempool_free(md_io, &mddev->md_io_pool);
+
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio);
+}
+
+static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
- const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = bio->bi_disk->private_data;
- unsigned int sectors;
- if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
+ if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio);
-
- if (mddev == NULL || mddev->pers == NULL) {
+ if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
+
+ blk_queue_split(&bio);
+
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
@@@ -512,27 -488,21 +512,27 @@@
return BLK_QC_T_NONE;
}
- /*
- * save the sectors now since our bio can
- * go away inside make_request
- */
- sectors = bio_sectors(bio);
+ if (bio->bi_end_io != md_end_io) {
+ struct md_io *md_io;
+
+ md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
+ md_io->mddev = mddev;
+ md_io->orig_bi_end_io = bio->bi_end_io;
+ md_io->orig_bi_private = bio->bi_private;
+
+ bio->bi_end_io = md_end_io;
+ bio->bi_private = md_io;
+
+ md_io->start_time = disk_start_io_acct(mddev->gendisk,
+ bio_sectors(bio),
+ bio_op(bio));
+ }
+
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
md_handle_request(mddev, bio);
- part_stat_lock();
- part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
- part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
- part_stat_unlock();
-
return BLK_QC_T_NONE;
}
@@@ -579,6 -549,26 +579,6 @@@ void mddev_resume(struct mddev *mddev
}
EXPORT_SYMBOL_GPL(mddev_resume);
-int mddev_congested(struct mddev *mddev, int bits)
-{
- struct md_personality *pers = mddev->pers;
- int ret = 0;
-
- rcu_read_lock();
- if (mddev->suspended)
- ret = 1;
- else if (pers && pers->congested)
- ret = pers->congested(mddev, bits);
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(mddev_congested);
-static int md_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
- return mddev_congested(mddev, bits);
-}
-
/*
* Generic flush handling for md
*/
@@@ -2451,13 -2441,9 +2451,13 @@@ static int bind_rdev_to_array(struct md
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
- if (sysfs_create_link(&rdev->kobj, ko, "block"))
- /* failure here is OK */;
+ /* failure here is OK */
+ err = sysfs_create_link(&rdev->kobj, ko, "block");
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
+ rdev->sysfs_unack_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
+ rdev->sysfs_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
@@@ -2491,11 -2477,7 +2491,11 @@@ static void unbind_rdev_from_array(stru
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
+ sysfs_put(rdev->sysfs_unack_badblocks);
+ sysfs_put(rdev->sysfs_badblocks);
rdev->sysfs_state = NULL;
+ rdev->sysfs_unack_badblocks = NULL;
+ rdev->sysfs_badblocks = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
@@@ -2840,7 -2822,7 +2840,7 @@@ rewrite
goto repeat;
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
@@@ -3220,8 -3202,8 +3220,8 @@@ slot_store(struct md_rdev *rdev, const
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
- if (sysfs_link_rdev(rdev->mddev, rdev))
- /* failure here is OK */;
+ /* failure here is OK */;
+ sysfs_link_rdev(rdev->mddev, rdev);
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
@@@ -4093,7 -4075,7 +4093,7 @@@ level_store(struct mddev *mddev, const
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
- sysfs_notify(&mddev->kobj, NULL, "level");
+ sysfs_notify_dirent_safe(mddev->sysfs_level);
md_new_event(mddev);
rv = len;
out_unlock:
@@@ -4846,7 -4828,7 +4846,7 @@@ action_store(struct mddev *mddev, cons
}
if (err)
return err;
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@@ -5552,13 -5534,6 +5552,13 @@@ static void md_free(struct kobject *ko
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->sysfs_completed)
+ sysfs_put(mddev->sysfs_completed);
+ if (mddev->sysfs_degraded)
+ sysfs_put(mddev->sysfs_degraded);
+ if (mddev->sysfs_level)
+ sysfs_put(mddev->sysfs_level);
+
if (mddev->gendisk)
del_gendisk(mddev->gendisk);
@@@ -5570,7 -5545,6 +5570,7 @@@
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
+ mempool_exit(&mddev->md_io_pool);
kfree(mddev);
}
@@@ -5666,13 -5640,8 +5666,13 @@@ static int md_alloc(dev_t dev, char *na
*/
mddev->hold_active = UNTIL_STOP;
+ error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
+ sizeof(struct md_io));
+ if (error)
+ goto abort;
+
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
+ mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
@@@ -5701,7 -5670,6 +5701,7 @@@
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
@@@ -5727,9 -5695,6 +5727,9 @@@
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd,
"array_state");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd,
"sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd,
"degraded");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
}
mddev_put(mddev);
return error;
@@@ -5999,6 -5964,8 +5999,6 @@@ int md_run(struct mddev *mddev
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info->congested_data = mddev;
- mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@@ -6015,7 -5982,7 +6015,7 @@@
if (mddev_is_clustered(mddev))
mddev->safemode_delay = 0;
else
- mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
@@@ -6082,7 -6049,7 +6082,7 @@@ static int do_md_run(struct mddev *mdde
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
out:
clear_bit(MD_NOT_READY, &mddev->flags);
return err;
@@@ -6383,6 -6350,7 +6383,6 @@@ static int do_md_stop(struct mddev *mdd
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@@ -7393,8 -7361,6 +7393,8 @@@ static int update_array_info(struct mdd
mddev->bitmap_info.nodes = 0;
md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
mddev_suspend(mddev);
md_bitmap_destroy(mddev);
@@@ -7840,21 -7806,23 +7840,21 @@@ static void md_release(struct gendisk *
mddev_put(mddev);
}
-static int md_media_changed(struct gendisk *disk)
-{
- struct mddev *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
+static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
{
struct mddev *mddev = disk->private_data;
+ unsigned int ret = 0;
+ if (mddev->changed)
+ ret = DISK_EVENT_MEDIA_CHANGE;
mddev->changed = 0;
- return 0;
+ return ret;
}
+
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
+ .submit_bio = md_submit_bio,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
@@@ -7862,7 -7830,8 +7862,7 @@@
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
+ .check_events = md_check_events,
};
static int md_thread(void *arg)
@@@ -8332,7 -8301,7 +8332,7 @@@ static __poll_t mdstat_poll(struct fil
static const struct proc_ops mdstat_proc_ops = {
.proc_open = md_seq_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
.proc_poll = mdstat_poll,
@@@ -8386,7 -8355,6 +8386,7 @@@ EXPORT_SYMBOL(unregister_md_cluster_ope
int md_setup_cluster(struct mddev *mddev, int nodes)
{
+ int ret;
if (!md_cluster_ops)
request_module("md-cluster");
spin_lock(&pers_lock);
@@@ -8398,10 -8366,7 +8398,10 @@@
}
spin_unlock(&pers_lock);
- return md_cluster_ops->join(mddev, nodes);
+ ret = md_cluster_ops->join(mddev, nodes);
+ if (!ret)
+ mddev->safemode_delay = 0;
+ return ret;
}
void md_cluster_stop(struct mddev *mddev)
@@@ -8802,7 -8767,7 +8802,7 @@@ void md_do_sync(struct md_thread *threa
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event(mddev);
update_time = jiffies;
@@@ -8830,7 -8795,7 +8830,7 @@@
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
while (j >= mddev->resync_max &&
@@@ -8937,7 -8902,7 +8937,7 @@@
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
@@@ -9067,7 -9032,7 +9067,7 @@@ static int remove_and_add_spares(struc
}
if (removed && mddev->kobj.sd)
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
if (this && removed)
goto no_add;
@@@ -9095,8 -9060,8 +9095,8 @@@
rdev->recovery_offset = 0;
}
if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
+ /* failure here is OK */
+ sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
@@@ -9350,7 -9315,8 +9350,7 @@@ void md_reap_sync_thread(struct mddev *
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
- sysfs_notify(&mddev->kobj, NULL,
- "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@@ -9440,7 -9406,8 +9440,7 @@@ int rdev_set_badblocks(struct md_rdev *
if (rv == 0) {
/* Make sure they get written out promptly */
if (test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL,
- "unacknowledged_bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
@@@ -9461,7 -9428,7 +9461,7 @@@ int rdev_clear_badblocks(struct md_rde
s += rdev->data_offset;
rv = badblocks_clear(&rdev->badblocks, s, sectors);
if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@@ -9691,7 -9658,7 +9691,7 @@@ static int read_rdev(struct mddev *mdde
if (rdev->recovery_offset == MaxSector &&
!test_bit(In_sync, &rdev->flags) &&
mddev->pers->spare_active(mddev))
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
put_page(swapout);
return 0;
diff --combined drivers/media/cec/core/cec-core.c
index c599cd94dd62,87e856788e0b..6edb1e09f9fa
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@@ -226,7 -226,7 +226,7 @@@ static int cec_error_inj_open(struct in
static const struct file_operations cec_error_inj_fops = {
.open = cec_error_inj_open,
.write = cec_error_inj_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -265,6 -265,7 +265,6 @@@ struct cec_adapter *cec_allocate_adapte
adap->sequence = 0;
adap->ops = ops;
adap->priv = priv;
- memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
mutex_init(&adap->lock);
INIT_LIST_HEAD(&adap->transmit_queue);
INIT_LIST_HEAD(&adap->wait_queue);
diff --combined drivers/memory/emif.c
index bb6a71d26798,58a82eea5850..1bf0656d5e9a
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@@ -138,7 -138,7 +138,7 @@@ static int emif_regdump_open(struct ino
static const struct file_operations emif_regdump_fops = {
.open = emif_regdump_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.release = single_release,
};
@@@ -157,7 -157,7 +157,7 @@@ static int emif_mr4_open(struct inode *
static const struct file_operations emif_mr4_fops = {
.open = emif_mr4_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.release = single_release,
};
@@@ -282,9 -282,10 +282,9 @@@ static void set_lpmode(struct emif_dat
* the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
*/
if ((emif->plat_data->ip_rev == EMIF_4D) &&
- (EMIF_LP_MODE_PWR_DN == lpmode)) {
+ (lpmode == EMIF_LP_MODE_PWR_DN)) {
WARN_ONCE(1,
- "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
- "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
+ "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to
LP_MODE_SELF_REFRESH(2)\n");
/* rollback LP_MODE to Self-refresh mode */
lpmode = EMIF_LP_MODE_SELF_REFRESH;
}
@@@ -713,7 -714,7 +713,7 @@@ static u32 get_ext_phy_ctrl_2_intelliph
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
fifo_we_slave_ratio << 22;
@@@ -724,7 -725,7 +724,7 @@@ static u32 get_ext_phy_ctrl_3_intelliph
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
@@@ -735,7 -736,7 +735,7 @@@ static u32 get_ext_phy_ctrl_4_intelliph
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
fifo_we_slave_ratio << 13;
@@@ -974,7 -975,8 +974,7 @@@ static irqreturn_t handle_temp_alert(vo
EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
dev_err(emif->dev,
- "%s:NOT Extended temperature capable memory."
- "Converting MR4=0x%02x as shutdown event\n",
+ "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown
event\n",
__func__, emif->temperature_level);
/*
* Temperature far too high - do kernel_power_off()
@@@ -1316,9 -1318,9 +1316,9 @@@ static void __init_or_module of_get_ddr
if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
dev_info->cal_resistors_per_cs = true;
- if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
+ if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
dev_info->type = DDR_TYPE_LPDDR2_S4;
- else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
+ else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
dev_info->type = DDR_TYPE_LPDDR2_S2;
of_property_read_u32(np_ddr, "density", &density);
@@@ -1561,8 -1563,11 +1561,8 @@@ static int __init_or_module emif_probe(
goto error;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
goto error;
- }
emif_onetime_settings(emif);
emif_debugfs_init(emif);
diff --combined drivers/memory/tegra/tegra124-emc.c
index ba5cb1f4dfc2,794431b2102c..a98de2c943d1
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@@ -984,7 -984,6 +984,7 @@@ static int tegra_emc_load_timings_from_
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra124-emc" },
+ { .compatible = "nvidia,tegra132-emc" },
{}
};
@@@ -1069,7 -1068,7 +1069,7 @@@ static int tegra_emc_debug_available_ra
static const struct file_operations tegra_emc_debug_available_rates_fops = {
.open = tegra_emc_debug_available_rates_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1179,11 -1178,11 +1179,11 @@@ static void emc_debugfs_init(struct dev
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
&tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
diff --combined drivers/memory/tegra/tegra186-emc.c
index 8478f59db432,1445ab51af6a..1d0a12164fe7
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@@ -94,7 -94,7 +94,7 @@@ static int tegra186_emc_debug_available
static const struct file_operations tegra186_emc_debug_available_rates_fops = {
.open = tegra186_emc_debug_available_rates_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -185,7 -185,7 +185,7 @@@ static int tegra186_emc_probe(struct pl
if (IS_ERR(emc->clk)) {
err = PTR_ERR(emc->clk);
dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
- return err;
+ goto put_bpmp;
}
platform_set_drvdata(pdev, emc);
@@@ -201,7 -201,7 +201,7 @@@
err = tegra_bpmp_transfer(emc->bpmp, &msg);
if (err < 0) {
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
- return err;
+ goto put_bpmp;
}
emc->debugfs.min_rate = ULONG_MAX;
@@@ -211,10 -211,8 +211,10 @@@
emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
sizeof(*emc->dvfs), GFP_KERNEL);
- if (!emc->dvfs)
- return -ENOMEM;
+ if (!emc->dvfs) {
+ err = -ENOMEM;
+ goto put_bpmp;
+ }
dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
@@@ -239,10 -237,15 +239,10 @@@
"failed to set rate range [%lu-%lu] for %pC\n",
emc->debugfs.min_rate, emc->debugfs.max_rate,
emc->clk);
- return err;
+ goto put_bpmp;
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(&pdev->dev, "failed to create debugfs directory\n");
- return 0;
- }
-
debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
emc, &tegra186_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
@@@ -251,10 -254,6 +251,10 @@@
emc, &tegra186_emc_debug_max_rate_fops);
return 0;
+
+put_bpmp:
+ tegra_bpmp_put(emc->bpmp);
+ return err;
}
static int tegra186_emc_remove(struct platform_device *pdev)
@@@ -268,10 -267,10 +268,10 @@@
}
static const struct of_device_id tegra186_emc_of_match[] = {
-#if defined(CONFIG_ARCH_TEGRA186_SOC)
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
{ .compatible = "nvidia,tegra186-emc" },
#endif
-#if defined(CONFIG_ARCH_TEGRA194_SOC)
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra194-emc" },
#endif
{ /* sentinel */ }
diff --combined drivers/memory/tegra/tegra20-emc.c
index 027f46287dbf,c63b14f7b356..83ca46345aba
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@@ -7,11 -7,11 +7,11 @@@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@@ -144,6 -144,7 +144,6 @@@ struct emc_timing
struct tegra_emc {
struct device *dev;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@@ -161,13 -162,17 +161,13 @@@
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
if (!status)
return IRQ_NONE;
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT)
- complete(&emc->clk_handshake_complete);
-
/* notify about HW problem */
if (status & EMC_REFRESH_OVERFLOW_INT)
dev_err_ratelimited(emc->dev,
@@@ -219,13 -224,14 +219,13 @@@ static int emc_prepare_timing_change(st
/* wait until programming has settled */
readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
- reinit_completion(&emc->clk_handshake_complete);
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
{
- unsigned long timeout;
+ int err;
+ u32 v;
dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
@@@ -236,12 -242,11 +236,12 @@@
return 0;
}
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "EMC-CAR handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
return 0;
@@@ -407,7 -412,7 +407,7 @@@ tegra_emc_find_node_by_ram_code(struct
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 emc_cfg, emc_dbg;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@@ -541,7 -546,7 +541,7 @@@ static int tegra_emc_debug_available_ra
static const struct file_operations tegra_emc_debug_available_rates_fops = {
.open = tegra_emc_debug_available_rates_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -642,11 -647,11 +642,11 @@@ static void tegra_emc_debugfs_init(stru
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@@ -681,6 -686,7 +681,6 @@@ static int tegra_emc_probe(struct platf
return -ENOMEM;
}
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --combined drivers/memory/tegra/tegra30-emc.c
index 055af0e08a2e,2ed027408af2..e448a55cb812
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@@ -11,6 -11,7 +11,6 @@@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
@@@ -326,6 -327,7 +326,6 @@@ struct emc_timing
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@@ -372,10 -374,52 +372,10 @@@ static int emc_seq_update_timing(struc
return 0;
}
-static void emc_complete_clk_change(struct tegra_emc *emc)
-{
- struct emc_timing *timing = emc->new_timing;
- unsigned int dram_num;
- bool failed = false;
- int err;
-
- /* re-enable auto-refresh */
- dram_num = tegra_mc_get_emem_device_count(emc->mc);
- writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
- emc->regs + EMC_REFCTRL);
-
- /* restore auto-calibration */
- if (emc->vref_cal_toggle)
- writel_relaxed(timing->emc_auto_cal_interval,
- emc->regs + EMC_AUTO_CAL_INTERVAL);
-
- /* restore dynamic self-refresh */
- if (timing->emc_cfg_dyn_self_ref) {
- emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
- writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
- }
-
- /* set number of clocks to wait after each ZQ command */
- if (emc->zcal_long)
- writel_relaxed(timing->emc_zcal_cnt_long,
- emc->regs + EMC_ZCAL_WAIT_CNT);
-
- /* wait for writes to settle */
- udelay(2);
-
- /* update restored timing */
- err = emc_seq_update_timing(emc);
- if (err)
- failed = true;
-
- /* restore early ACK */
- mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
- WRITE_ONCE(emc->bad_state, failed);
-}
-
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
@@@ -390,6 -434,18 +390,6 @@@
/* clear interrupts */
writel_relaxed(status, emc->regs + EMC_INTSTATUS);
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT) {
- if (completion_done(&emc->clk_handshake_complete)) {
- dev_err_ratelimited(emc->dev,
- "bogus handshake interrupt\n");
- return IRQ_NONE;
- }
-
- emc_complete_clk_change(emc);
- complete(&emc->clk_handshake_complete);
- }
-
return IRQ_HANDLED;
}
@@@ -745,58 -801,29 +745,58 @@@ static int emc_prepare_timing_change(st
*/
mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
- reinit_completion(&emc->clk_handshake_complete);
-
- emc->new_timing = timing;
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc,
unsigned long rate)
{
- unsigned long timeout;
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned int dram_num;
+ int err;
+ u32 v;
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "emc-car handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
- if (READ_ONCE(emc->bad_state))
- return -EIO;
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
- return 0;
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (!err)
+ emc->bad_state = false;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ return err;
}
static int emc_unprepare_timing_change(struct tegra_emc *emc,
@@@ -1006,7 -1033,7 +1006,7 @@@ static struct device_node *emc_find_nod
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 fbio_cfg5, emc_cfg, emc_dbg;
enum emc_dram_type dram_type;
@@@ -1147,7 -1174,7 +1147,7 @@@ static int tegra_emc_debug_available_ra
static const struct file_operations tegra_emc_debug_available_rates_fops = {
.open = tegra_emc_debug_available_rates_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1248,11 -1275,11 +1248,11 @@@ static void tegra_emc_debugfs_init(stru
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@@ -1294,6 -1321,7 +1294,6 @@@ static int tegra_emc_probe(struct platf
if (!emc->mc)
return -EPROBE_DEFER;
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --combined drivers/mfd/ab3100-core.c
index ee71ae04b5e6,0f461180d29f..092c78b5e26f
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@@ -473,7 -473,7 +473,7 @@@ static int ab3100_registers_open(struc
static const struct file_operations ab3100_registers_fops = {
.open = ab3100_registers_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -498,7 -498,7 +498,7 @@@ static ssize_t ab3100_get_set_reg(struc
int i = 0;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min((ssize_t)count, (ssize_t)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
diff --combined drivers/mfd/ab3100-otp.c
index c393102e3a39,3667fa4c9fed..3c1157e7a0c1
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@@ -29,22 -29,22 +29,22 @@@
/**
* struct ab3100_otp
- * @dev containing device
- * @locked whether the OTP is locked, after locking, no more bits
+ * @dev: containing device
+ * @locked: whether the OTP is locked, after locking, no more bits
* can be changed but before locking it is still possible
* to change bits from 1->0.
- * @freq clocking frequency for the OTP, this frequency is either
+ * @freq: clocking frequency for the OTP, this frequency is either
* 32768Hz or 1MHz/30
- * @paf product activation flag, indicates whether this is a real
+ * @paf: product activation flag, indicates whether this is a real
* product (paf true) or a lab board etc (paf false)
- * @imeich if this is set it is possible to override the
+ * @imeich: if this is set it is possible to override the
* IMEI number found in the tac, fac and svn fields with
* (secured) software
- * @cid customer ID
- * @tac type allocation code of the IMEI
- * @fac final assembly code of the IMEI
- * @svn software version number of the IMEI
- * @debugfs a debugfs file used when dumping to file
+ * @cid: customer ID
+ * @tac: type allocation code of the IMEI
+ * @fac: final assembly code of the IMEI
+ * @svn: software version number of the IMEI
+ * @debugfs: a debugfs file used when dumping to file
*/
struct ab3100_otp {
struct device *dev;
@@@ -117,7 -117,7 +117,7 @@@ static int ab3100_otp_open(struct inod
static const struct file_operations ab3100_otp_operations = {
.open = ab3100_otp_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/mfd/ab8500-debugfs.c
index 6d1bf7c3ca3b,fcdb618fe2db..838798c98d0f
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@@ -1376,7 -1376,7 +1376,7 @@@ static int ab8500_all_banks_open(struc
static const struct file_operations ab8500_all_banks_fops = {
.open = ab8500_all_banks_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -1801,7 -1801,7 +1801,7 @@@ static ssize_t ab8500_hwreg_write(struc
int buf_size, ret;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min((int)count, (int)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
@@@ -1959,7 -1959,7 +1959,7 @@@ static ssize_t ab8500_unsubscribe_write
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -1968,7 -1968,7 +1968,7 @@@
static const struct file_operations ab8500_address_fops = {
.open = ab8500_address_open,
.write = ab8500_address_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -1977,7 -1977,7 +1977,7 @@@
static const struct file_operations ab8500_val_fops = {
.open = ab8500_val_open,
.write = ab8500_val_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -1986,7 -1986,7 +1986,7 @@@
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -1995,7 -1995,7 +1995,7 @@@
static const struct file_operations ab8500_unsubscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_unsubscribe_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
@@@ -2004,7 -2004,7 +2004,7 @@@
static const struct file_operations ab8500_hwreg_fops = {
.open = ab8500_hwreg_open,
.write = ab8500_hwreg_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
diff --combined drivers/mfd/tps65010.c
index 7e7dbee58ca9,19ed0a372389..31259867f637
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@@ -290,7 -290,7 +290,7 @@@ static int dbg_tps_open(struct inode *i
static const struct file_operations debug_fops = {
.open = dbg_tps_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -404,6 -404,7 +404,6 @@@ static void tps65010_work(struct work_s
tps65010_interrupt(tps);
if (test_and_clear_bit(FLAG_VBUS_CHANGED, &tps->flags)) {
- int status;
u8 chgconfig, tmp;
chgconfig = i2c_smbus_read_byte_data(tps->client,
@@@ -414,8 -415,8 +414,8 @@@
else if (tps->vbus >= 100)
chgconfig |= TPS_VBUS_CHARGING;
- status = i2c_smbus_write_byte_data(tps->client,
- TPS_CHGCONFIG, chgconfig);
+ i2c_smbus_write_byte_data(tps->client,
+ TPS_CHGCONFIG, chgconfig);
/* vbus update fails unless VBUS is connected! */
tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG);
diff --combined drivers/misc/habanalabs/common/debugfs.c
index c50c6fc9e905,000000000000..71cfe1b6fafc
mode 100644,000000..100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@@ -1,1404 -1,0 +1,1404 @@@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#define MMU_ADDR_BUF_SIZE 40
+#define MMU_ASID_BUF_SIZE 10
+#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
+
+static struct dentry *hl_debug_root;
+
+static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 *val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, (long *) val);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+ pkt.value = cpu_to_le64(val);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.led_index = cpu_to_le32(led);
+ pkt.value = cpu_to_le64(state);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
+}
+
+static int command_buffers_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cb *cb;
+ bool first = true;
+
+ spin_lock(&dev_entry->cb_spinlock);
+
+ list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS
counter\n");
+ seq_puts(s,
"---------------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %03d %d 0x%08x %d %d %d\n",
+ cb->id, cb->ctx_id, cb->size,
+ kref_read(&cb->refcount),
+ cb->mmap, cb->cs_cnt);
+ }
+
+ spin_unlock(&dev_entry->cb_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs *cs;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_spinlock);
+
+ list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
+ seq_puts(s, "------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %llu %d %d %d %d\n",
+ cs->sequence, cs->ctx->asid,
+ kref_read(&cs->refcount),
+ cs->submitted, cs->completed);
+ }
+
+ spin_unlock(&dev_entry->cs_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_jobs_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs_job *job;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+
+ list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
+ seq_puts(s, "---------------------------------------\n");
+ }
+ if (job->cs)
+ seq_printf(s,
+ " %02d %llu %d %d\n",
+ job->id, job->cs->sequence, job->cs->ctx->asid,
+ job->hw_queue_id);
+ else
+ seq_printf(s,
+ " %02d 0 %d %d\n",
+ job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
+ }
+
+ spin_unlock(&dev_entry->cs_job_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int userptr_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_userptr *userptr;
+ char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
+ "DMA_FROM_DEVICE", "DMA_NONE"};
+ bool first = true;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s,
"----------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " 0x%-14llx %-10u %-30s\n",
+ userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int vm_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_ctx *ctx;
+ struct hl_vm *vm;
+ struct hl_vm_hash_node *hnode;
+ struct hl_userptr *userptr;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ enum vm_type_t *vm_type;
+ bool once = true;
+ u64 j;
+ int i;
+
+ if (!dev_entry->hdev->mmu_enable)
+ return 0;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+
+ list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
+ once = false;
+ seq_puts(s, "\n\n----------------------------------------------------");
+ seq_puts(s, "\n----------------------------------------------------\n\n");
+ seq_printf(s, "ctx asid: %u\n", ctx->asid);
+
+ seq_puts(s, "\nmappings:\n\n");
+ seq_puts(s, " virtual address size handle\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each(ctx->mem_hash, i, hnode, node) {
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ userptr = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10u\n",
+ hnode->vaddr, userptr->size);
+ } else {
+ phys_pg_pack = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10llu %-4u\n",
+ hnode->vaddr, phys_pg_pack->total_size,
+ phys_pg_pack->handle);
+ }
+ }
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ vm = &ctx->hdev->vm;
+ spin_lock(&vm->idr_lock);
+
+ if (!idr_is_empty(&vm->phys_pg_pack_handles))
+ seq_puts(s, "\n\nallocations:\n");
+
+ idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
+ if (phys_pg_pack->asid != ctx->asid)
+ continue;
+
+ seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
+ seq_printf(s, "page size: %u\n\n",
+ phys_pg_pack->page_size);
+ seq_puts(s, " physical address\n");
+ seq_puts(s, "---------------------\n");
+ for (j = 0 ; j < phys_pg_pack->npages ; j++) {
+ seq_printf(s, " 0x%-14llx\n",
+ phys_pg_pack->pages[j]);
+ }
+ }
+ spin_unlock(&vm->idr_lock);
+
+ }
+
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+
+ if (!once)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+/* these inline functions are copied from mmu.c */
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & mask) >> shift);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
+ mmu_specs->hop0_shift);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
+ mmu_specs->hop1_shift);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
+ mmu_specs->hop2_shift);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
+ mmu_specs->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
+ mmu_specs->hop4_shift);
+}
+
+static inline u64 get_next_hop_addr(u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & HOP_PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static int mmu_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ struct hl_ctx *ctx;
+ bool is_dram_addr;
+
+ u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
+ hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
+ hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
+ hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
+ hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
+ virt_addr = dev_entry->mmu_addr;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
+ ctx = hdev->kernel_ctx;
+ else
+ ctx = hdev->compute_ctx;
+
+ if (!ctx) {
+ dev_err(hdev->dev, "no ctx available\n");
+ return 0;
+ }
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ /* the following lookup is copied from unmap() in mmu.c */
+
+ hop0_addr = get_hop0_addr(ctx);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
+ hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+ hop1_addr = get_next_hop_addr(hop0_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
+ hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+ hop2_addr = get_next_hop_addr(hop1_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
+ hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+ hop3_addr = get_next_hop_addr(hop2_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+ hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ hop4_addr = get_next_hop_addr(hop3_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
+ hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ if (!(hop4_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ } else {
+ if (!(hop3_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ }
+
+ seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr);
+
+ seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
+ seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
+ seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
+
+ seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
+ seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
+ seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
+
+ seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
+ seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
+ seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
+
+ seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
+ seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
+ seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
+ seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
+ seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
+ }
+
+ goto out;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+out:
+ mutex_unlock(&ctx->mmu_lock);
+
+ return 0;
+}
+
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[MMU_KBUF_SIZE];
+ char *c;
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+ if (count > sizeof(kbuf) - 1)
+ goto err;
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+ kbuf[count] = 0;
+
+ c = strchr(kbuf, ' ');
+ if (!c)
+ goto err;
+ *c = '\0';
+
+ rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
+ if (rc)
+ goto err;
+
+ if (strncmp(c+1, "0x", 2))
+ goto err;
+ rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
+ if (rc)
+ goto err;
+
+ return count;
+
+err:
+ dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
+
+ return -EINVAL;
+}
+
+static int engines_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't check device idle during reset\n");
+ return 0;
+ }
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, s);
+
+ return 0;
+}
+
+static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (!hdev->mmu_enable)
+ goto out;
+
+ if (hdev->dram_supports_virtual_memory &&
+ (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
+ return true;
+
+ if (addr >= prop->pmmu.start_addr &&
+ addr < prop->pmmu.end_addr)
+ return true;
+
+ if (addr >= prop->pmmu_huge.start_addr &&
+ addr < prop->pmmu_huge.end_addr)
+ return true;
+out:
+ return false;
+}
+
+static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
+ u64 *phys_addr)
+{
+ struct hl_ctx *ctx = hdev->compute_ctx;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ u64 hop_addr, hop_pte_addr, hop_pte;
+ u64 offset_mask = HOP4_MASK | FLAGS_MASK;
+ int rc = 0;
+ bool is_dram_addr;
+
+ if (!ctx) {
+ dev_err(hdev->dev, "no ctx available\n");
+ return -EINVAL;
+ }
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ /* hop 0 */
+ hop_addr = get_hop0_addr(ctx);
+ hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 1 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 2 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ /* hop 3 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ if (!(hop_pte & LAST_MASK)) {
+ /* hop 4 */
+ hop_addr = get_next_hop_addr(hop_pte);
+ if (hop_addr == ULLONG_MAX)
+ goto not_mapped;
+ hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
+ virt_addr);
+ hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ offset_mask = FLAGS_MASK;
+ }
+
+ if (!(hop_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
+
+ goto out;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+ rc = -EINVAL;
+out:
+ mutex_unlock(&ctx->mmu_lock);
+ return rc;
+}
+
+static ssize_t hl_data_read32(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u64 addr = entry->addr;
+ u32 val;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
+
+ if (*ppos)
+ return 0;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%08x\n", val);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+}
+
+static ssize_t hl_data_write32(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 addr = entry->addr;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
+ value, addr);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_data_read64(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u64 addr = entry->addr;
+ u64 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%016llx\n", val);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+}
+
+static ssize_t hl_data_write64(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 addr = entry->addr;
+ u64 value;
+ ssize_t rc;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
+ value, addr);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_get_power_state(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ int i;
+
+ if (*ppos)
+ return 0;
+
+ if (hdev->pdev->current_state == PCI_D0)
+ i = 1;
+ else if (hdev->pdev->current_state == PCI_D3hot)
+ i = 2;
+ else
+ i = 3;
+
+ sprintf(tmp_buf,
+ "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+}
+
+static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value == 1) {
+ pci_set_power_state(hdev->pdev, PCI_D0);
+ pci_restore_state(hdev->pdev);
+ rc = pci_enable_device(hdev->pdev);
+ } else if (value == 2) {
+ pci_save_state(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+ pci_set_power_state(hdev->pdev, PCI_D3hot);
+ } else {
+ dev_dbg(hdev->dev, "invalid power state value %u\n", value);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u32 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, &val);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to read from I2C bus %d, addr %d, reg %d\n",
+ entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%02x\n", val);
+ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+
+ return rc;
+}
+
+static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, value);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
+ value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_led0_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 0, value);
+
+ return count;
+}
+
+static ssize_t hl_led1_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 1, value);
+
+ return count;
+}
+
+static ssize_t hl_led2_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 2, value);
+
+ return count;
+}
+
+static ssize_t hl_device_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ static const char *help =
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
+ return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
+}
+
+static ssize_t hl_device_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char data[30] = {0};
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ simple_write_to_buffer(data, 29, ppos, buf, count);
+
+ if (strncmp("disable", data, strlen("disable")) == 0) {
+ hdev->disabled = true;
+ } else if (strncmp("enable", data, strlen("enable")) == 0) {
+ hdev->disabled = false;
+ } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
+ hdev->asic_funcs->suspend(hdev);
+ } else if (strncmp("resume", data, strlen("resume")) == 0) {
+ hdev->asic_funcs->resume(hdev);
+ } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) ==
0) {
+ hdev->device_cpu_disabled = true;
+ } else {
+ dev_err(hdev->dev,
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+ count = -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change clock gating during reset\n");
+ return 0;
+ }
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ hdev->clock_gating_mask = value;
+ hdev->asic_funcs->set_clock_gating(hdev);
+
+ return count;
+}
+
+static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change stop on error during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ hdev->stop_on_err = value ? 1 : 0;
+
+ hl_device_reset(hdev, false, false);
+
+ return count;
+}
+
+static const struct file_operations hl_data32b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read32,
+ .write = hl_data_write32
+};
+
+static const struct file_operations hl_data64b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read64,
+ .write = hl_data_write64
+};
+
+static const struct file_operations hl_i2c_data_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_i2c_data_read,
+ .write = hl_i2c_data_write
+};
+
+static const struct file_operations hl_power_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_get_power_state,
+ .write = hl_set_power_state
+};
+
+static const struct file_operations hl_led0_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led0_write
+};
+
+static const struct file_operations hl_led1_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led1_write
+};
+
+static const struct file_operations hl_led2_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led2_write
+};
+
+static const struct file_operations hl_device_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_device_read,
+ .write = hl_device_write
+};
+
+static const struct file_operations hl_clk_gate_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_clk_gate_read,
+ .write = hl_clk_gate_write
+};
+
+static const struct file_operations hl_stop_on_err_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_stop_on_err_read,
+ .write = hl_stop_on_err_write
+};
+
+static const struct hl_info_list hl_debugfs_list[] = {
+ {"command_buffers", command_buffers_show, NULL},
+ {"command_submission", command_submission_show, NULL},
+ {"command_submission_jobs", command_submission_jobs_show, NULL},
+ {"userptr", userptr_show, NULL},
+ {"vm", vm_show, NULL},
+ {"mmu", mmu_show, mmu_asid_va_write},
+ {"engines", engines_show, NULL}
+};
+
+static int hl_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct hl_debugfs_entry *node = inode->i_private;
+
+ return single_open(file, node->info_ent->show, node);
+}
+
+static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct hl_debugfs_entry *node = file->f_inode->i_private;
+
+ if (node->info_ent->write)
+ return node->info_ent->write(file, buf, count, f_pos);
+ else
+ return -EINVAL;
+
+}
+
+static const struct file_operations hl_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = hl_debugfs_open,
- .read = seq_read,
++ .read_iter = seq_read_iter,
+ .write = hl_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hl_debugfs_add_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+ int count = ARRAY_SIZE(hl_debugfs_list);
+ struct hl_debugfs_entry *entry;
+ struct dentry *ent;
+ int i;
+
+ dev_entry->hdev = hdev;
+ dev_entry->entry_arr = kmalloc_array(count,
+ sizeof(struct hl_debugfs_entry),
+ GFP_KERNEL);
+ if (!dev_entry->entry_arr)
+ return;
+
+ INIT_LIST_HEAD(&dev_entry->file_list);
+ INIT_LIST_HEAD(&dev_entry->cb_list);
+ INIT_LIST_HEAD(&dev_entry->cs_list);
+ INIT_LIST_HEAD(&dev_entry->cs_job_list);
+ INIT_LIST_HEAD(&dev_entry->userptr_list);
+ INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
+ mutex_init(&dev_entry->file_mutex);
+ spin_lock_init(&dev_entry->cb_spinlock);
+ spin_lock_init(&dev_entry->cs_spinlock);
+ spin_lock_init(&dev_entry->cs_job_spinlock);
+ spin_lock_init(&dev_entry->userptr_spinlock);
+ spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
+
+ dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
+ hl_debug_root);
+
+ debugfs_create_x64("addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->addr);
+
+ debugfs_create_file("data32",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data32b_fops);
+
+ debugfs_create_file("data64",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data64b_fops);
+
+ debugfs_create_file("set_power_state",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_power_fops);
+
+ debugfs_create_u8("i2c_bus",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_bus);
+
+ debugfs_create_u8("i2c_addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_addr);
+
+ debugfs_create_u8("i2c_reg",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_reg);
+
+ debugfs_create_file("i2c_data",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_i2c_data_fops);
+
+ debugfs_create_file("led0",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led0_fops);
+
+ debugfs_create_file("led1",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led1_fops);
+
+ debugfs_create_file("led2",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led2_fops);
+
+ debugfs_create_file("device",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_device_fops);
+
+ debugfs_create_file("clk_gate",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_clk_gate_fops);
+
+ debugfs_create_file("stop_on_err",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_stop_on_err_fops);
+
+ for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+
+ ent = debugfs_create_file(hl_debugfs_list[i].name,
+ 0444,
+ dev_entry->root,
+ entry,
+ &hl_debugfs_fops);
+ entry->dent = ent;
+ entry->info_ent = &hl_debugfs_list[i];
+ entry->dev_entry = dev_entry;
+ }
+}
+
+void hl_debugfs_remove_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+
+ debugfs_remove_recursive(entry->root);
+
+ mutex_destroy(&entry->file_mutex);
+ kfree(entry->entry_arr);
+}
+
+void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_add(&hpriv->debugfs_list, &dev_entry->file_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_del(&hpriv->debugfs_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_add_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_add(&cb->debugfs_list, &dev_entry->cb_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_remove_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_del(&cb->debugfs_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_add_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_add(&cs->debugfs_list, &dev_entry->cs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_remove_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_del(&cs->debugfs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_add(&job->debugfs_list, &dev_entry->cs_job_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_del(&job->debugfs_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_del(&userptr->debugfs_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_del(&ctx->debugfs_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void __init hl_debugfs_init(void)
+{
+ hl_debug_root = debugfs_create_dir("habanalabs", NULL);
+}
+
+void hl_debugfs_fini(void)
+{
+ debugfs_remove_recursive(hl_debug_root);
+}
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 05f33b7e3677,3891fb39a889..424026da73cc
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@@ -229,7 -229,7 +229,7 @@@ static int cim_la_open(struct inode *in
static const struct file_operations cim_la_fops = {
.owner = THIS_MODULE,
.open = cim_la_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -272,7 -272,7 +272,7 @@@ static int cim_pif_la_open(struct inod
static const struct file_operations cim_pif_la_fops = {
.owner = THIS_MODULE,
.open = cim_pif_la_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -318,7 -318,7 +318,7 @@@ static int cim_ma_la_open(struct inode
static const struct file_operations cim_ma_la_fops = {
.owner = THIS_MODULE,
.open = cim_ma_la_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -411,7 -411,7 +411,7 @@@ static int cim_ibq_open(struct inode *i
static const struct file_operations cim_ibq_fops = {
.owner = THIS_MODULE,
.open = cim_ibq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -440,7 -440,7 +440,7 @@@ static int cim_obq_open(struct inode *i
static const struct file_operations cim_obq_fops = {
.owner = THIS_MODULE,
.open = cim_obq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -686,7 -686,7 +686,7 @@@ static ssize_t tp_la_write(struct file
static const struct file_operations tp_la_fops = {
.owner = THIS_MODULE,
.open = tp_la_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
.write = tp_la_write
@@@ -722,7 -722,7 +722,7 @@@ static int ulprx_la_open(struct inode *
static const struct file_operations ulprx_la_fops = {
.owner = THIS_MODULE,
.open = ulprx_la_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -812,7 -812,7 +812,7 @@@ static ssize_t pm_stats_clear(struct fi
static const struct file_operations pm_stats_debugfs_fops = {
.owner = THIS_MODULE,
.open = pm_stats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = pm_stats_clear
@@@ -1136,7 -1136,7 +1136,7 @@@ static int devlog_open(struct inode *in
static const struct file_operations devlog_fops = {
.owner = THIS_MODULE,
.open = devlog_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -1234,7 -1234,7 +1234,7 @@@ static int mboxlog_open(struct inode *i
static const struct file_operations mboxlog_fops = {
.owner = THIS_MODULE,
.open = mboxlog_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -1317,7 -1317,7 +1317,7 @@@ static ssize_t mbox_write(struct file *
static const struct file_operations mbox_debugfs_fops = {
.owner = THIS_MODULE,
.open = mbox_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = mbox_write
@@@ -1610,7 -1610,7 +1610,7 @@@ out
static const struct file_operations mps_trc_debugfs_fops = {
.owner = THIS_MODULE,
.open = mps_trc_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = mps_trc_write
@@@ -1930,7 -1930,7 +1930,7 @@@ static int mps_tcam_open(struct inode *
static const struct file_operations mps_tcam_debugfs_fops = {
.owner = THIS_MODULE,
.open = mps_tcam_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2007,7 -2007,7 +2007,7 @@@ static int rss_open(struct inode *inode
static const struct file_operations rss_debugfs_fops = {
.owner = THIS_MODULE,
.open = rss_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -2226,7 -2226,7 +2226,7 @@@ static ssize_t rss_key_write(struct fil
static const struct file_operations rss_key_debugfs_fops = {
.owner = THIS_MODULE,
.open = rss_key_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = rss_key_write
@@@ -2310,7 -2310,7 +2310,7 @@@ static int rss_pf_config_open(struct in
static const struct file_operations rss_pf_config_debugfs_fops = {
.owner = THIS_MODULE,
.open = rss_pf_config_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -2373,7 -2373,7 +2373,7 @@@ static int rss_vf_config_open(struct in
static const struct file_operations rss_vf_config_debugfs_fops = {
.owner = THIS_MODULE,
.open = rss_vf_config_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private
};
@@@ -2561,7 -2561,7 +2561,7 @@@ static int dcb_info_open(struct inode *
static const struct file_operations dcb_info_debugfs_fops = {
.owner = THIS_MODULE,
.open = dcb_info_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2742,58 -2742,6 +2742,58 @@@ do {
}
r -= eth_entries;
+ for_each_port(adap, j) {
+ struct port_info *pi = adap2pinfo(adap, j);
+ const struct sge_eth_rxq *rx;
+
+ mutex_lock(&pi->vi_mirror_mutex);
+ if (!pi->vi_mirror_count) {
+ mutex_unlock(&pi->vi_mirror_mutex);
+ continue;
+ }
+
+ if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) {
+ r -= DIV_ROUND_UP(pi->nmirrorqsets, 4);
+ mutex_unlock(&pi->vi_mirror_mutex);
+ continue;
+ }
+
+ rx = &s->mirror_rxq[j][r * 4];
+ n = min(4, pi->nmirrorqsets - 4 * r);
+
+ S("QType:", "Mirror-Rxq");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxCSO:", stats.rx_cso);
+ RL("VLANxtract:", stats.vlan_ex);
+ RL("LROmerged:", stats.lro_merged);
+ RL("LROpackets:", stats.lro_pkts);
+ RL("RxDrops:", stats.rx_drops);
+ RL("RxBadPkts:", stats.bad_rx_pkts);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
+ mutex_unlock(&pi->vi_mirror_mutex);
+ goto out;
+ }
+
if (!adap->tc_mqprio)
goto skip_mqprio;
@@@ -3150,10 -3098,9 +3150,10 @@@ unlock
return 0;
}
-static int sge_queue_entries(const struct adapter *adap)
+static int sge_queue_entries(struct adapter *adap)
{
int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+ int mirror_rxq_entries = 0;
if (adap->tc_mqprio) {
struct cxgb4_tc_port_mqprio *port_mqprio;
@@@ -3176,15 -3123,6 +3176,15 @@@
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ mutex_lock(&pi->vi_mirror_mutex);
+ if (pi->vi_mirror_count)
+ mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4);
+ mutex_unlock(&pi->vi_mirror_mutex);
+ }
+
if (!is_uld(adap))
goto lld_only;
@@@ -3199,7 -3137,7 +3199,7 @@@
mutex_unlock(&uld_mutex);
lld_only:
- return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+ return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries +
eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@@ -3245,7 -3183,7 +3245,7 @@@ static int sge_qinfo_open(struct inode
static const struct file_operations sge_qinfo_debugfs_fops = {
.owner = THIS_MODULE,
.open = sge_qinfo_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index dbe8ee7e0e21,20e74d5e837f..6983e3d55e22
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@@ -2039,7 -2039,7 +2039,7 @@@ static int mboxlog_open(struct inode *i
static const struct file_operations mboxlog_fops = {
.owner = THIS_MODULE,
.open = mboxlog_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2192,7 -2192,7 +2192,7 @@@ static int sge_qinfo_open(struct inode
static const struct file_operations sge_qinfo_debugfs_fops = {
.owner = THIS_MODULE,
.open = sge_qinfo_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2338,7 -2338,7 +2338,7 @@@ static int sge_qstats_open(struct inod
static const struct file_operations sge_qstats_proc_fops = {
.owner = THIS_MODULE,
.open = sge_qstats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2436,7 -2436,7 +2436,7 @@@ static int interfaces_open(struct inod
static const struct file_operations interfaces_proc_fops = {
.owner = THIS_MODULE,
.open = interfaces_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -2916,39 -2916,6 +2916,39 @@@ static const struct net_device_ops cxgb
#endif
};
+/**
+ * cxgb4vf_get_port_mask - Get port mask for the VF based on mac
+ * address stored on the adapter
+ * @adapter: The adapter
+ *
+ * Find the the port mask for the VF based on the index of mac
+ * address stored in the adapter. If no mac address is stored on
+ * the adapter for the VF, use the port mask received from the
+ * firmware.
+ */
+static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
+{
+ unsigned int naddr = 1, pidx = 0;
+ unsigned int pmask, rmask = 0;
+ u8 mac[ETH_ALEN];
+ int err;
+
+ pmask = adapter->params.vfres.pmask;
+ while (pmask) {
+ if (pmask & 1) {
+ err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
+ if (!err && !is_zero_ether_addr(mac))
+ rmask |= (1 << pidx);
+ }
+ pmask >>= 1;
+ pidx++;
+ }
+ if (!rmask)
+ rmask = adapter->params.vfres.pmask;
+
+ return rmask;
+}
+
/*
* "Probe" a device: initialize a device and construct all kernel and driver
* state needed to manage the device. This routine is called "init_one" in
@@@ -2957,12 -2924,13 +2957,12 @@@
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int pci_using_dac;
- int err, pidx;
- unsigned int pmask;
struct adapter *adapter;
- struct port_info *pi;
struct net_device *netdev;
- unsigned int pf;
+ struct port_info *pi;
+ unsigned int pmask;
+ int pci_using_dac;
+ int err, pidx;
/*
* Initialize generic PCI device state.
@@@ -3105,7 -3073,8 +3105,7 @@@
/*
* Allocate our "adapter ports" and stitch everything together.
*/
- pmask = adapter->params.vfres.pmask;
- pf = t4vf_get_pf_from_vf(adapter);
+ pmask = cxgb4vf_get_port_mask(adapter);
for_each_port(adapter, pidx) {
int port_id, viid;
u8 mac[ETH_ALEN];
@@@ -3188,7 -3157,7 +3188,7 @@@
goto err_free_dev;
}
- err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
+ err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
if (err) {
dev_err(&pdev->dev,
"unable to determine MAC ACL address, "
diff --combined drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 56d9927fbfda,bc99fe0aabf0..f80f94e1e10a
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@@ -21,7 -21,7 +21,7 @@@ static int dpaa2_dbg_cpu_show(struct se
seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
"CPU", "Rx", "Rx Err", "Rx SG",
"Tx", "Tx Err", "Tx conf",
- "Tx SG", "Tx realloc", "Enq busy");
+ "Tx SG", "Tx converted to SG", "Enq busy");
for_each_online_cpu(i) {
stats = per_cpu_ptr(priv->percpu_stats, i);
@@@ -35,7 -35,7 +35,7 @@@
stats->tx_errors,
extras->tx_conf_frames,
extras->tx_sg_frames,
- extras->tx_reallocs,
+ extras->tx_converted_sg_frames,
extras->tx_portal_busy);
}
@@@ -56,7 -56,7 +56,7 @@@ static int dpaa2_dbg_cpu_open(struct in
static const struct file_operations dpaa2_dbg_cpu_ops = {
.open = dpaa2_dbg_cpu_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -90,10 -90,6 +90,10 @@@ static int dpaa2_dbg_fqs_show(struct se
if (err)
fcnt = 0;
+ /* Skip FQs with no traffic */
+ if (!fq->stats.frames && !fcnt)
+ continue;
+
seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid,
fq->target_cpu,
@@@ -120,7 -116,7 +120,7 @@@ static int dpaa2_dbg_fqs_open(struct in
static const struct file_operations dpaa2_dbg_fq_ops = {
.open = dpaa2_dbg_fqs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -165,7 -161,7 +165,7 @@@ static int dpaa2_dbg_ch_open(struct ino
static const struct file_operations dpaa2_dbg_ch_ops = {
.open = dpaa2_dbg_ch_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 267ad4eddb5c,cf3b7ec4db73..a3a1691e5ed9
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@@ -260,7 -260,7 +260,7 @@@ struct hcmd_write_data
__be32 cmd_id;
__be32 flags;
__be16 length;
- u8 data[0];
+ u8 data[];
} __packed;
static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
@@@ -419,7 -419,7 +419,7 @@@ static int iwl_dbgfs_fw_info_open(struc
static const struct file_operations iwl_dbgfs_fw_info_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_fw_info_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
};
diff --combined drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index d06afcf46d67,c61bd9803078..e356d9ec5957
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@@ -181,7 -181,7 +181,7 @@@ mt7615_ampdu_stat_open(struct inode *in
static const struct file_operations fops_ampdu_stat = {
.open = mt7615_ampdu_stat_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -234,11 -234,10 +234,11 @@@ mt7615_queues_acq(struct seq_file *s, v
int i;
for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
+ int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
+ int acs = i / MT7615_MAX_WMM_SETS;
u32 ctrl, val, qlen = 0;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
ctrl = BIT(31) | BIT(15) | (acs << 8);
for (j = 0; j < 32; j++) {
@@@ -246,11 -245,11 +246,11 @@@
continue;
mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
+ ctrl | (j + (wmm_idx << 5)));
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
}
return 0;
diff --combined drivers/net/xen-netback/xenbus.c
index 7e62a6ee7622,06b253eba25d..aad7e526eb5f
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@@ -152,7 -152,7 +152,7 @@@ static int xenvif_io_ring_open(struct i
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_io_ring_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
@@@ -393,24 -393,6 +393,24 @@@ static void set_backend_state(struct ba
}
}
+static void read_xenbus_frontend_xdp(struct backend_info *be,
+ struct xenbus_device *dev)
+{
+ struct xenvif *vif = be->vif;
+ u16 headroom;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "xdp-headroom", "%hu", &headroom);
+ if (err != 1) {
+ vif->xdp_headroom = 0;
+ return;
+ }
+ if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
+ headroom = XEN_NETIF_MAX_XDP_HEADROOM;
+ vif->xdp_headroom = headroom;
+}
+
/**
* Callback received when the frontend's state changes.
*/
@@@ -435,11 -417,6 +435,11 @@@ static void frontend_changed(struct xen
set_backend_state(be, XenbusStateConnected);
break;
+ case XenbusStateReconfiguring:
+ read_xenbus_frontend_xdp(be, dev);
+ xenbus_switch_state(dev, XenbusStateReconfigured);
+ break;
+
case XenbusStateClosing:
set_backend_state(be, XenbusStateClosing);
break;
@@@ -970,8 -947,6 +970,8 @@@ static int read_xenbus_vif_flags(struc
vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
"feature-ipv6-csum-offload", 0);
+ read_xenbus_frontend_xdp(be, dev);
+
return 0;
}
@@@ -1061,15 -1036,6 +1061,15 @@@ static int netback_probe(struct xenbus_
goto abort_transaction;
}
+ /* we can adjust a headroom for netfront XDP processing */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-xdp-headroom", "%d",
+ provides_xdp_headroom);
+ if (err) {
+ message = "writing feature-xdp-headroom";
+ goto abort_transaction;
+ }
+
/* We don't support rx-flip path (except old guests who
* don't grok this feature flag).
*/
diff --combined drivers/nvme/host/fabrics.c
index 4ec4829d6233,13496542b6d7..aa2b98d0d0b3
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@@ -547,7 -547,7 +547,7 @@@ static struct nvmf_transport_ops *nvmf_
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
- if (ctrl->state != NVME_CTRL_DELETING &&
+ if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
@@@ -1140,7 -1140,7 +1140,7 @@@ static int nvmf_dev_release(struct inod
static const struct file_operations nvmf_dev_fops = {
.owner = THIS_MODULE,
.write = nvmf_dev_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.open = nvmf_dev_open,
.release = nvmf_dev_release,
};
diff --combined drivers/pci/controller/pci-tegra.c
index b532d5082fb6,bfb8dbc2d0ea..7498d5f5cf48
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@@ -181,6 -181,13 +181,6 @@@
#define AFI_PEXBIAS_CTRL_0 0x168
-#define RP_PRIV_XP_DL 0x00000494
-#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
-
-#define RP_RX_HDR_LIMIT 0x00000e00
-#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
-#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
-
#define RP_ECTL_2_R1 0x00000e84
#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
@@@ -316,6 -323,7 +316,6 @@@ struct tegra_pcie_soc
bool program_uphy;
bool update_clamp_threshold;
bool program_deskew_time;
- bool raw_violation_fixup;
bool update_fc_timer;
bool has_cache_bars;
struct {
@@@ -651,6 -659,23 +651,6 @@@ static void tegra_pcie_apply_sw_fixup(s
writel(value, port->base + RP_VEND_CTL0);
}
- /* Fixup for read after write violation. */
- if (soc->raw_violation_fixup) {
- value = readl(port->base + RP_RX_HDR_LIMIT);
- value &= ~RP_RX_HDR_LIMIT_PW_MASK;
- value |= RP_RX_HDR_LIMIT_PW;
- writel(value, port->base + RP_RX_HDR_LIMIT);
-
- value = readl(port->base + RP_PRIV_XP_DL);
- value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
- writel(value, port->base + RP_PRIV_XP_DL);
-
- value = readl(port->base + RP_VEND_XP);
- value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
- value |= soc->update_fc_threshold;
- writel(value, port->base + RP_VEND_XP);
- }
-
if (soc->update_fc_timer) {
value = readl(port->base + RP_VEND_XP);
value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
@@@ -2391,6 -2416,7 +2391,6 @@@ static const struct tegra_pcie_soc tegr
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = true,
.ectl.enable = false,
@@@ -2420,6 -2446,7 +2420,6 @@@ static const struct tegra_pcie_soc tegr
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@@ -2432,6 -2459,8 +2432,6 @@@ static const struct tegra_pcie_soc tegr
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x44ac44ac,
- /* FC threshold is bit[25:18] */
- .update_fc_threshold = 0x03fc0000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@@ -2441,6 -2470,7 +2441,6 @@@
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = false,
- .raw_violation_fixup = true,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@@ -2464,6 -2494,7 +2464,6 @@@ static const struct tegra_pcie_soc tegr
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = true,
- .raw_violation_fixup = false,
.update_fc_timer = true,
.has_cache_bars = false,
.ectl = {
@@@ -2505,6 -2536,7 +2505,6 @@@ static const struct tegra_pcie_soc tegr
.program_uphy = false,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@@ -2602,7 -2634,7 +2602,7 @@@ static int tegra_pcie_ports_open(struc
static const struct file_operations tegra_pcie_ports_ops = {
.owner = THIS_MODULE,
.open = tegra_pcie_ports_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined drivers/platform/x86/asus-wmi.c
index 8f4acdc06b13,9efa854fff13..13f7d1bf076b
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@@ -441,7 -441,6 +441,7 @@@ static int asus_wmi_battery_add(struct
* battery is named BATT.
*/
if (strcmp(battery->desc->name, "BAT0") != 0 &&
+ strcmp(battery->desc->name, "BAT1") != 0 &&
strcmp(battery->desc->name, "BATT") != 0)
return -ENODEV;
@@@ -2503,7 -2502,7 +2503,7 @@@ static int asus_wmi_debugfs_open(struc
static const struct file_operations asus_wmi_debugfs_io_ops = {
.owner = THIS_MODULE,
.open = asus_wmi_debugfs_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/platform/x86/intel_pmc_core.c
index 338ea5222555,4941349004f6..e0bee021d044
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@@ -415,7 -415,7 +415,7 @@@ static const struct pmc_bit_map tgl_lpm
{"PCIe_Gen3PLL_OFF_STS", BIT(20)},
{"OPIOPLL_OFF_STS", BIT(21)},
{"OCPLL_OFF_STS", BIT(22)},
- {"AudioPLL_OFF_STS", BIT(23)},
+ {"MainPLL_OFF_STS", BIT(23)},
{"MIPIPLL_OFF_STS", BIT(24)},
{"Fast_XTAL_Osc_OFF_STS", BIT(25)},
{"AC_Ring_Osc_OFF_STS", BIT(26)},
@@@ -795,7 -795,7 +795,7 @@@ static int pmc_core_mphy_pg_show(struc
msleep(10);
val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
- for (index = 0; map[index].name && index < 8; index++) {
+ for (index = 0; index < 8 && map[index].name; index++) {
seq_printf(s, "%-32s\tState: %s\n",
map[index].name,
map[index].bit_mask & val_low ? "Not power gated" :
@@@ -894,7 -894,7 +894,7 @@@ static int pmc_core_ltr_ignore_open(str
static const struct file_operations pmc_core_ltr_ignore_ops = {
.open = pmc_core_ltr_ignore_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = pmc_core_ltr_ignore_write,
.llseek = seq_lseek,
.release = single_release,
diff --combined drivers/platform/x86/thinkpad_acpi.c
index 9eda2f84a3cf,f571d6254e7c..446936c1e65a
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@@ -885,19 -885,11 +885,19 @@@ static ssize_t dispatch_proc_write(stru
if (!ibm || !ibm->write)
return -EINVAL;
+ if (count > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ kernbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
- kernbuf = strndup_user(userbuf, PAGE_SIZE);
- if (IS_ERR(kernbuf))
- return PTR_ERR(kernbuf);
+ if (copy_from_user(kernbuf, userbuf, count)) {
+ kfree(kernbuf);
+ return -EFAULT;
+ }
+ kernbuf[count] = 0;
ret = ibm->write(kernbuf);
if (ret == 0)
ret = count;
@@@ -909,7 -901,7 +909,7 @@@
static const struct proc_ops dispatch_proc_ops = {
.proc_open = dispatch_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = dispatch_proc_write,
@@@ -4030,8 -4022,8 +4030,8 @@@ static bool hotkey_notify_6xxx(const u3
return true;
case TP_HKEY_EV_THM_CSM_COMPLETED:
pr_debug("EC reports: Thermal Control Command set completed (DYTC)\n");
- /* recommended action: do nothing, we don't have
- * Lenovo ATM information */
+ /* Thermal event - pass on to event handler */
+ tpacpi_driver_event(hkey);
return true;
case TP_HKEY_EV_THM_TRANSFM_CHANGED:
pr_debug("EC reports: Thermal Transformation changed (GMTS)\n");
@@@ -6963,13 -6955,10 +6963,13 @@@ static int __init brightness_init(struc
pr_warn("Cannot enable backlight brightness support, ACPI is already handling
it. Refer to the acpi_backlight kernel parameter.\n");
return 1;
}
- } else if (tp_features.bright_acpimode && brightness_enable > 1) {
- pr_notice("Standard ACPI backlight interface not available, thinkpad_acpi native
brightness control enabled\n");
+ } else if (!tp_features.bright_acpimode) {
+ pr_notice("ACPI backlight interface not available\n");
+ return 1;
}
+ pr_notice("ACPI native brightness control enabled\n");
+
/*
* Check for module parameter bogosity, note that we
* init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
@@@ -7968,7 -7957,7 +7968,7 @@@ static struct ibm_struct volume_driver_
* does so, its initial value is meaningless (0x07).
*
* For firmware bugs, refer to:
- *
http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
* ----
*
@@@ -7993,7 -7982,7 +7993,7 @@@
* mode.
*
* For firmware bugs, refer to:
- *
http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
* ----
*
@@@ -9318,6 -9307,9 +9318,6 @@@ static struct ibm_struct mute_led_drive
#define GET_STOP "BCSG"
#define SET_STOP "BCSS"
-#define START_ATTR "charge_start_threshold"
-#define STOP_ATTR "charge_stop_threshold"
-
enum {
BAT_ANY = 0,
BAT_PRIMARY = 1,
@@@ -9603,52 -9595,38 +9603,52 @@@ static ssize_t tpacpi_battery_show(int
return sprintf(buf, "%d\n", ret);
}
-static ssize_t charge_start_threshold_show(struct device *device,
+static ssize_t charge_control_start_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return tpacpi_battery_show(THRESHOLD_START, device, buf);
}
-static ssize_t charge_stop_threshold_show(struct device *device,
+static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return tpacpi_battery_show(THRESHOLD_STOP, device, buf);
}
-static ssize_t charge_start_threshold_store(struct device *dev,
+static ssize_t charge_control_start_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return tpacpi_battery_store(THRESHOLD_START, dev, buf, count);
}
-static ssize_t charge_stop_threshold_store(struct device *dev,
+static ssize_t charge_control_end_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return tpacpi_battery_store(THRESHOLD_STOP, dev, buf, count);
}
-static DEVICE_ATTR_RW(charge_start_threshold);
-static DEVICE_ATTR_RW(charge_stop_threshold);
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+struct device_attribute dev_attr_charge_start_threshold = __ATTR(
+ charge_start_threshold,
+ 0644,
+ charge_control_start_threshold_show,
+ charge_control_start_threshold_store
+);
+struct device_attribute dev_attr_charge_stop_threshold = __ATTR(
+ charge_stop_threshold,
+ 0644,
+ charge_control_end_threshold_show,
+ charge_control_end_threshold_store
+);
static struct attribute *tpacpi_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
&dev_attr_charge_start_threshold.attr,
&dev_attr_charge_stop_threshold.attr,
NULL,
@@@ -9817,105 -9795,6 +9817,105 @@@ static struct ibm_struct lcdshadow_driv
.write = lcdshadow_write,
};
+/*************************************************************************
+ * DYTC subdriver, for the Lenovo lapmode feature
+ */
+
+#define DYTC_CMD_GET 2 /* To get current IC function and mode */
+#define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */
+
+static bool dytc_lapmode;
+
+static void dytc_lapmode_notify_change(void)
+{
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "dytc_lapmode");
+}
+
+static int dytc_command(int command, int *output)
+{
+ acpi_handle dytc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DYTC", &dytc_handle))) {
+ /* Platform doesn't support DYTC */
+ return -ENODEV;
+ }
+ if (!acpi_evalf(dytc_handle, output, NULL, "dd", command))
+ return -EIO;
+ return 0;
+}
+
+static int dytc_lapmode_get(bool *state)
+{
+ int output, err;
+
+ err = dytc_command(DYTC_CMD_GET, &output);
+ if (err)
+ return err;
+ *state = output & BIT(DYTC_GET_LAPMODE_BIT) ? true : false;
+ return 0;
+}
+
+static void dytc_lapmode_refresh(void)
+{
+ bool new_state;
+ int err;
+
+ err = dytc_lapmode_get(&new_state);
+ if (err || (new_state == dytc_lapmode))
+ return;
+
+ dytc_lapmode = new_state;
+ dytc_lapmode_notify_change();
+}
+
+/* sysfs lapmode entry */
+static ssize_t dytc_lapmode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dytc_lapmode);
+}
+
+static DEVICE_ATTR_RO(dytc_lapmode);
+
+static struct attribute *dytc_attributes[] = {
+ &dev_attr_dytc_lapmode.attr,
+ NULL,
+};
+
+static const struct attribute_group dytc_attr_group = {
+ .attrs = dytc_attributes,
+};
+
+static int tpacpi_dytc_init(struct ibm_init_struct *iibm)
+{
+ int err;
+
+ err = dytc_lapmode_get(&dytc_lapmode);
+ /* If support isn't available (ENODEV) then don't return an error
+ * but just don't create the sysfs group
+ */
+ if (err == -ENODEV)
+ return 0;
+ /* For all other errors we can flag the failure */
+ if (err)
+ return err;
+
+ /* Platform supports this feature - create the group */
+ err = sysfs_create_group(&tpacpi_pdev->dev.kobj, &dytc_attr_group);
+ return err;
+}
+
+static void dytc_exit(void)
+{
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &dytc_attr_group);
+}
+
+static struct ibm_struct dytc_driver_data = {
+ .name = "dytc",
+ .exit = dytc_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@@ -9963,10 -9842,6 +9963,10 @@@ static void tpacpi_driver_event(const u
mutex_unlock(&kbdlight_mutex);
}
+
+ if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED)
+ dytc_lapmode_refresh();
+
}
static void hotkey_driver_event(const unsigned int scancode)
@@@ -10219,7 -10094,7 +10219,7 @@@ static int __must_check __init get_thin
* X32 or newer, all Z series; Some models must have an
* up-to-date BIOS or they will not be detected.
*
- * See
http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See
https://thinkwiki.org/wiki/List_of_DMI_IDs
*/
while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
if (sscanf(dev->name,
@@@ -10405,10 -10280,6 +10405,10 @@@ static struct ibm_init_struct ibms_init
.init = tpacpi_lcdshadow_init,
.data = &lcdshadow_driver_data,
},
+ {
+ .init = tpacpi_dytc_init,
+ .data = &dytc_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
@@@ -10742,8 -10613,8 +10742,8 @@@ MODULE_DEVICE_TABLE(acpi, ibm_htk_devic
/*
* DMI matching for module autoloading
*
- * See
http://thinkwiki.org/wiki/List_of_DMI_IDs
- * See
http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ * See
https://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See
https://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
*
* Only models listed in thinkwiki will be supported, so add yours
* if it is not there yet.
diff --combined drivers/platform/x86/toshiba_acpi.c
index 36fff00af9eb,770477bb407d..707c537e3c9b
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@@ -1428,7 -1428,7 +1428,7 @@@ static ssize_t lcd_proc_write(struct fi
static const struct proc_ops lcd_proc_ops = {
.proc_open = lcd_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = lcd_proc_write,
@@@ -1534,7 -1534,7 +1534,7 @@@ static ssize_t video_proc_write(struct
static const struct proc_ops video_proc_ops = {
.proc_open = video_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = video_proc_write,
@@@ -1611,7 -1611,7 +1611,7 @@@ static ssize_t fan_proc_write(struct fi
static const struct proc_ops fan_proc_ops = {
.proc_open = fan_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = fan_proc_write,
@@@ -1655,7 -1655,7 +1655,7 @@@ static ssize_t keys_proc_write(struct f
static const struct proc_ops keys_proc_ops = {
.proc_open = keys_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = keys_proc_write,
@@@ -3114,7 -3114,7 +3114,7 @@@ static int toshiba_acpi_add(struct acpi
toshiba_accelerometer_available(dev);
if (dev->accelerometer_supported) {
- dev->indio_dev = iio_device_alloc(sizeof(*dev));
+ dev->indio_dev = iio_device_alloc(&acpi_dev->dev, sizeof(*dev));
if (!dev->indio_dev) {
pr_err("Unable to allocate iio device\n");
goto iio_error;
@@@ -3124,6 -3124,7 +3124,6 @@@
dev->indio_dev->info = &toshiba_iio_accel_info;
dev->indio_dev->name = "Toshiba accelerometer";
- dev->indio_dev->dev.parent = &acpi_dev->dev;
dev->indio_dev->modes = INDIO_DIRECT_MODE;
dev->indio_dev->channels = toshiba_iio_accel_channels;
dev->indio_dev->num_channels =
diff --combined drivers/pwm/core.c
index 276e939a5684,ad39b0bb0b03..a7037136d49f
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@@ -510,12 -510,12 +510,12 @@@ static void pwm_apply_state_debug(struc
last->period > s2.period &&
last->period <= state->period)
dev_warn(chip->dev,
- ".apply didn't pick the best available period (requested: %u, applied: %u,
possible: %u)\n",
+ ".apply didn't pick the best available period (requested: %llu, applied:
%llu, possible: %llu)\n",
state->period, s2.period, last->period);
if (state->enabled && state->period < s2.period)
dev_warn(chip->dev,
- ".apply is supposed to round down period (requested: %u, applied: %u)\n",
+ ".apply is supposed to round down period (requested: %llu, applied:
%llu)\n",
state->period, s2.period);
if (state->enabled &&
@@@ -524,14 -524,14 +524,14 @@@
last->duty_cycle > s2.duty_cycle &&
last->duty_cycle <= state->duty_cycle)
dev_warn(chip->dev,
- ".apply didn't pick the best available duty cycle (requested: %u/%u,
applied: %u/%u, possible: %u/%u)\n",
+ ".apply didn't pick the best available duty cycle (requested: %llu/%llu,
applied: %llu/%llu, possible: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period,
last->duty_cycle, last->period);
if (state->enabled && state->duty_cycle < s2.duty_cycle)
dev_warn(chip->dev,
- ".apply is supposed to round down duty_cycle (requested: %u/%u, applied:
%u/%u)\n",
+ ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied:
%llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period);
@@@ -558,7 -558,7 +558,7 @@@
(s1.enabled && s1.period != last->period) ||
(s1.enabled && s1.duty_cycle != last->duty_cycle)) {
dev_err(chip->dev,
- ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d
%u/%u)\n",
+ ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d
%llu/%llu)\n",
s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
last->enabled, last->polarity, last->duty_cycle,
last->period);
@@@ -1284,8 -1284,8 +1284,8 @@@ static void pwm_dbg_show(struct pwm_chi
if (state.enabled)
seq_puts(s, " enabled");
- seq_printf(s, " period: %u ns", state.period);
- seq_printf(s, " duty: %u ns", state.duty_cycle);
+ seq_printf(s, " period: %llu ns", state.period);
+ seq_printf(s, " duty: %llu ns", state.duty_cycle);
seq_printf(s, " polarity: %s",
state.polarity ? "inverse" : "normal");
@@@ -1342,7 -1342,7 +1342,7 @@@ static int pwm_seq_open(struct inode *i
static const struct file_operations pwm_debugfs_ops = {
.owner = THIS_MODULE,
.open = pwm_seq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined drivers/s390/block/dasd.c
index eb17fea8075c,17a9a35e9aed..20f256e6d38b
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@@ -1089,7 -1089,7 +1089,7 @@@ static int dasd_stats_open(struct inod
static const struct file_operations dasd_stats_raw_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_write,
@@@ -2802,7 -2802,7 +2802,7 @@@ static void __dasd_cleanup_cqr(struct d
blk_update_request(req, BLK_STS_OK,
blk_rq_bytes(req) - proc_bytes);
blk_mq_requeue_request(req, true);
- } else {
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
}
}
diff --combined drivers/s390/cio/qdio_debug.c
index 863d17c802ca,31e3f86ac81d..54218c365380
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@@ -165,7 -165,7 +165,7 @@@ static int qstat_show(struct seq_file *
}
seq_printf(m, "\n1 2.. 4.. 8.. "
- "16.. 32.. 64.. 127\n");
+ "16.. 32.. 64.. 128\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
@@@ -281,7 -281,7 +281,7 @@@ static int qperf_seq_open(struct inode
static const struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
.open = qperf_seq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = qperf_seq_write,
.llseek = seq_lseek,
.release = single_release,
diff --combined drivers/soc/qcom/socinfo.c
index e19102f46302,ab2a56c75f13..d9c64a78e49c
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@@ -24,7 -24,6 +24,7 @@@
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) &
0xffff))
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
/*
* SMEM item id, used to acquire handles to respective
@@@ -122,16 -121,6 +122,16 @@@ struct socinfo
__le32 chip_family;
__le32 raw_device_family;
__le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_defective_parts;
+ __le32 ndefective_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
};
#ifdef CONFIG_DEBUG_FS
@@@ -146,12 -135,6 +146,12 @@@ struct socinfo_params
u32 raw_ver;
u32 hw_plat;
u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_defective_parts;
+ u32 ndefective_parts_array_offset;
+ u32 nmodem_supported;
};
struct smem_image_version {
@@@ -219,10 -202,8 +219,10 @@@ static const struct soc_id soc_id[] =
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 318, "SDM630" },
{ 321, "SDM845" },
{ 341, "SDA845" },
+ { 356, "SM8250" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@@ -247,7 -228,7 +247,7 @@@ static int qcom_open_##name(struct inod
\
static const struct file_operations qcom_ ##name## _ops = { \
.open = qcom_open_##name, \
- .read = seq_read, \
+ .read_iter = seq_read_iter, \
.llseek = seq_lseek, \
.release = single_release, \
}
@@@ -275,10 -256,7 +275,10 @@@ static int qcom_show_pmic_model(struct
if (model < 0)
return -EINVAL;
- seq_printf(seq, "%s\n", pmic_models[model]);
+ if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
return 0;
}
@@@ -294,19 -272,9 +294,19 @@@ static int qcom_show_pmic_die_revision(
return 0;
}
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
QCOM_OPEN(build_id, qcom_show_build_id);
QCOM_OPEN(pmic_model, qcom_show_pmic_model);
QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
#define DEFINE_IMAGE_OPS(type) \
static int show_image_##type(struct seq_file *seq, void *p) \
@@@ -323,7 -291,7 +323,7 @@@ static int open_image_##type(struct ino
\
static const struct file_operations qcom_image_##type##_ops = { \
.open = open_image_##type, \
- .read = seq_read, \
+ .read_iter = seq_read_iter, \
.llseek = seq_lseek, \
.release = single_release, \
}
@@@ -344,38 -312,7 +344,38 @@@ static void socinfo_debugfs_init(struc
qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+ debugfs_create_x32("info_fmt", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset =
__le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_defective_parts =
__le32_to_cpu(info->num_defective_parts);
+ qcom_socinfo->info.ndefective_parts_array_offset =
__le32_to_cpu(info->ndefective_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0400,
qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_defective_parts", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_defective_parts);
+ debugfs_create_u32("ndefective_parts_array_offset", 0400,
qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ndefective_parts_array_offset);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ /* Fall through */
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
diff --combined drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index c52540b734fd,e198779db6fc..a47d32a2166f
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@@ -9,6 -9,22 +9,6 @@@
* <jkmaline(a)cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline(a)cc.hut.fi>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* James P. Ketrenos <ipw2100-admin(a)linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@@ -249,7 -265,7 +249,7 @@@ static int open_debug_level(struct inod
static const struct proc_ops debug_level_proc_ops = {
.proc_open = open_debug_level,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_write = write_debug_level,
.proc_release = single_release,
diff --combined drivers/usb/chipidea/debug.c
index da5d18cf6840,d734b97a7327..31a6fdcb7620
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@@ -18,7 -18,7 +18,7 @@@
#include "bits.h"
#include "otg.h"
-/**
+/*
* ci_device_show: prints information about device capabilities and status
*/
static int ci_device_show(struct seq_file *s, void *data)
@@@ -47,7 -47,7 +47,7 @@@
}
DEFINE_SHOW_ATTRIBUTE(ci_device);
-/**
+/*
* ci_port_test_show: reads port test mode
*/
static int ci_port_test_show(struct seq_file *s, void *data)
@@@ -67,7 -67,7 +67,7 @@@
return 0;
}
-/**
+/*
* ci_port_test_write: writes port test mode
*/
static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
@@@ -110,12 -110,12 +110,12 @@@ static int ci_port_test_open(struct ino
static const struct file_operations ci_port_test_fops = {
.open = ci_port_test_open,
.write = ci_port_test_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
-/**
+/*
* ci_qheads_show: DMA contents of all queue heads
*/
static int ci_qheads_show(struct seq_file *s, void *data)
@@@ -147,7 -147,7 +147,7 @@@
}
DEFINE_SHOW_ATTRIBUTE(ci_qheads);
-/**
+/*
* ci_requests_show: DMA contents of all requests currently queued (all endpts)
*/
static int ci_requests_show(struct seq_file *s, void *data)
@@@ -296,7 -296,7 +296,7 @@@ static int ci_role_open(struct inode *i
static const struct file_operations ci_role_fops = {
.open = ci_role_open,
.write = ci_role_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/usb/dwc2/debugfs.c
index aaafd463d72a,19a083e4f396..5a9586a6432f
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@@ -37,15 -37,15 +37,15 @@@ static ssize_t testmode_write(struct fi
return -EFAULT;
if (!strncmp(buf, "test_j", 6))
- testmode = TEST_J;
+ testmode = USB_TEST_J;
else if (!strncmp(buf, "test_k", 6))
- testmode = TEST_K;
+ testmode = USB_TEST_K;
else if (!strncmp(buf, "test_se0_nak", 12))
- testmode = TEST_SE0_NAK;
+ testmode = USB_TEST_SE0_NAK;
else if (!strncmp(buf, "test_packet", 11))
- testmode = TEST_PACKET;
+ testmode = USB_TEST_PACKET;
else if (!strncmp(buf, "test_force_enable", 17))
- testmode = TEST_FORCE_EN;
+ testmode = USB_TEST_FORCE_ENABLE;
else
testmode = 0;
@@@ -78,19 -78,19 +78,19 @@@ static int testmode_show(struct seq_fil
case 0:
seq_puts(s, "no test\n");
break;
- case TEST_J:
+ case USB_TEST_J:
seq_puts(s, "test_j\n");
break;
- case TEST_K:
+ case USB_TEST_K:
seq_puts(s, "test_k\n");
break;
- case TEST_SE0_NAK:
+ case USB_TEST_SE0_NAK:
seq_puts(s, "test_se0_nak\n");
break;
- case TEST_PACKET:
+ case USB_TEST_PACKET:
seq_puts(s, "test_packet\n");
break;
- case TEST_FORCE_EN:
+ case USB_TEST_FORCE_ENABLE:
seq_puts(s, "test_force_enable\n");
break;
default:
@@@ -109,7 -109,7 +109,7 @@@ static const struct file_operations tes
.owner = THIS_MODULE,
.open = testmode_open,
.write = testmode_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/usb/dwc3/debugfs.c
index 2c7b6dd79cdf,14c075ad29f9..83e183c90ca7
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@@ -2,7 -2,7 +2,7 @@@
/**
* debugfs.c - DesignWare USB3 DRD Controller DebugFS file
*
- * Copyright (C) 2010-2011 Texas Instruments Incorporated -
http://www.ti.com
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated -
https://www.ti.com
*
* Authors: Felipe Balbi <balbi(a)ti.com>om>,
* Sebastian Andrzej Siewior <bigeasy(a)linutronix.de>
@@@ -380,7 -380,7 +380,7 @@@ static ssize_t dwc3_lsp_write(struct fi
static const struct file_operations dwc3_lsp_fops = {
.open = dwc3_lsp_open,
.write = dwc3_lsp_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -445,7 -445,7 +445,7 @@@ static ssize_t dwc3_mode_write(struct f
static const struct file_operations dwc3_mode_fops = {
.open = dwc3_mode_open,
.write = dwc3_mode_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -466,19 -466,19 +466,19 @@@ static int dwc3_testmode_show(struct se
case 0:
seq_printf(s, "no test\n");
break;
- case TEST_J:
+ case USB_TEST_J:
seq_printf(s, "test_j\n");
break;
- case TEST_K:
+ case USB_TEST_K:
seq_printf(s, "test_k\n");
break;
- case TEST_SE0_NAK:
+ case USB_TEST_SE0_NAK:
seq_printf(s, "test_se0_nak\n");
break;
- case TEST_PACKET:
+ case USB_TEST_PACKET:
seq_printf(s, "test_packet\n");
break;
- case TEST_FORCE_EN:
+ case USB_TEST_FORCE_ENABLE:
seq_printf(s, "test_force_enable\n");
break;
default:
@@@ -506,15 -506,15 +506,15 @@@ static ssize_t dwc3_testmode_write(stru
return -EFAULT;
if (!strncmp(buf, "test_j", 6))
- testmode = TEST_J;
+ testmode = USB_TEST_J;
else if (!strncmp(buf, "test_k", 6))
- testmode = TEST_K;
+ testmode = USB_TEST_K;
else if (!strncmp(buf, "test_se0_nak", 12))
- testmode = TEST_SE0_NAK;
+ testmode = USB_TEST_SE0_NAK;
else if (!strncmp(buf, "test_packet", 11))
- testmode = TEST_PACKET;
+ testmode = USB_TEST_PACKET;
else if (!strncmp(buf, "test_force_enable", 17))
- testmode = TEST_FORCE_EN;
+ testmode = USB_TEST_FORCE_ENABLE;
else
testmode = 0;
@@@ -528,7 -528,7 +528,7 @@@
static const struct file_operations dwc3_testmode_fops = {
.open = dwc3_testmode_open,
.write = dwc3_testmode_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -620,7 -620,7 +620,7 @@@ static ssize_t dwc3_link_state_write(st
static const struct file_operations dwc3_link_state_fops = {
.open = dwc3_link_state_open,
.write = dwc3_link_state_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined drivers/usb/gadget/udc/lpc32xx_udc.c
index e8a4637a9a17,bac74bb67290..e79f5f286cbb
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@@ -532,7 -532,7 +532,7 @@@ static int proc_udc_open(struct inode *
static const struct file_operations proc_ops = {
.owner = THIS_MODULE,
.open = proc_udc_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1926,7 -1926,7 +1926,7 @@@ static const struct usb_ep_ops lpc32xx_
};
/* Send a ZLP on a non-0 IN EP */
-void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+static void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
/* Clear EP status */
udc_clearep_getsts(udc, ep->hwep_num);
@@@ -1940,7 -1940,7 +1940,7 @@@
* This function will only be called when a delayed ZLP needs to be sent out
* after a DMA transfer has filled both buffers.
*/
-void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 epstatus;
struct lpc32xx_request *req;
@@@ -2986,7 -2986,7 +2986,7 @@@ static void lpc32xx_rmwkup_chg(int remo
/* Enable or disable USB remote wakeup */
}
-struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
+static struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
.vbus_drv_pol = 0,
.conn_chgb = &lpc32xx_usbd_conn_chg,
.susp_chgb = &lpc32xx_usbd_susp_chg,
diff --combined drivers/usb/host/xhci-debugfs.c
index 92e25a62fdb5,4a640031d944..8b6747b53434
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@@ -110,7 -110,6 +110,7 @@@ static void xhci_debugfs_free_regset(st
kfree(regset);
}
+__printf(6, 7)
static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
const struct debugfs_reg32 *regs,
size_t nregs, struct dentry *parent,
@@@ -249,7 -248,7 +249,7 @@@ static int xhci_ring_open(struct inode
static const struct file_operations xhci_ring_fops = {
.open = xhci_ring_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -330,7 -329,7 +330,7 @@@ static int xhci_context_open(struct ino
static const struct file_operations xhci_context_fops = {
.open = xhci_context_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -391,7 -390,7 +391,7 @@@ static ssize_t xhci_port_write(struct f
static const struct file_operations port_fops = {
.open = xhci_port_open,
.write = xhci_port_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined fs/affs/file.c
index a26a0f96c119,7d51cc2e3dab..87d23705135b
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@@ -14,7 -14,6 +14,7 @@@
*/
#include <linux/uio.h>
+#include <linux/blkdev.h>
#include "affs.h"
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
@@@ -976,7 -975,6 +976,6 @@@ const struct file_operations affs_file_
.open = affs_file_open,
.release = affs_file_release,
.fsync = affs_file_fsync,
- .splice_read = generic_file_splice_read,
};
const struct inode_operations affs_file_inode_operations = {
diff --combined fs/block_dev.c
index 8ae833e00443,0aa66a6075eb..5b07f2665e41
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@@ -105,7 -105,16 +105,7 @@@ EXPORT_SYMBOL(invalidate_bdev)
static void set_init_blocksize(struct block_device *bdev)
{
- unsigned bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
-
- while (bsize < PAGE_SIZE) {
- if (size & bsize)
- break;
- bsize <<= 1;
- }
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
}
int set_blocksize(struct block_device *bdev, int size)
@@@ -119,8 -128,9 +119,8 @@@
return -EINVAL;
/* Don't change the size if it is same as current */
- if (bdev->bd_block_size != size) {
+ if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
- bdev->bd_block_size = size;
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
@@@ -693,12 -703,12 +693,12 @@@ int bdev_read_page(struct block_device
if (!ops->rw_page || bdev_get_integrity(bdev))
return result;
- result = blk_queue_enter(bdev->bd_queue, 0);
+ result = blk_queue_enter(bdev->bd_disk->queue, 0);
if (result)
return result;
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
REQ_OP_READ);
- blk_queue_exit(bdev->bd_queue);
+ blk_queue_exit(bdev->bd_disk->queue);
return result;
}
@@@ -729,7 -739,7 +729,7 @@@ int bdev_write_page(struct block_devic
if (!ops->rw_page || bdev_get_integrity(bdev))
return -EOPNOTSUPP;
- result = blk_queue_enter(bdev->bd_queue, 0);
+ result = blk_queue_enter(bdev->bd_disk->queue, 0);
if (result)
return result;
@@@ -742,7 -752,7 +742,7 @@@
clean_page_buffers(page);
unlock_page(page);
}
- blk_queue_exit(bdev->bd_queue);
+ blk_queue_exit(bdev->bd_disk->queue);
return result;
}
@@@ -773,6 -783,7 +773,6 @@@ static void init_once(void *foo
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
- INIT_LIST_HEAD(&bdev->bd_list);
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
@@@ -788,6 -799,9 +788,6 @@@ static void bdev_evict_inode(struct ino
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode); /* is it needed here? */
clear_inode(inode);
- spin_lock(&bdev_lock);
- list_del_init(&bdev->bd_list);
- spin_unlock(&bdev_lock);
/* Detach inode from wb early as bdi_put() may free bdi->wb */
inode_detach_wb(inode);
if (bdev->bd_bdi != &noop_backing_dev_info) {
@@@ -862,6 -876,8 +862,6 @@@ static int bdev_set(struct inode *inode
return 0;
}
-static LIST_HEAD(all_bdevs);
-
struct block_device *bdget(dev_t dev)
{
struct block_device *bdev;
@@@ -879,6 -895,7 +879,6 @@@
bdev->bd_contains = NULL;
bdev->bd_super = NULL;
bdev->bd_inode = inode;
- bdev->bd_block_size = i_blocksize(inode);
bdev->bd_part_count = 0;
bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK;
@@@ -886,6 -903,9 +886,6 @@@
inode->i_bdev = bdev;
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
- spin_lock(&bdev_lock);
- list_add(&bdev->bd_list, &all_bdevs);
- spin_unlock(&bdev_lock);
unlock_new_inode(inode);
}
return bdev;
@@@ -906,14 -926,13 +906,14 @@@ EXPORT_SYMBOL(bdgrab)
long nr_blockdev_pages(void)
{
- struct block_device *bdev;
+ struct inode *inode;
long ret = 0;
- spin_lock(&bdev_lock);
- list_for_each_entry(bdev, &all_bdevs, bd_list) {
- ret += bdev->bd_inode->i_mapping->nrpages;
- }
- spin_unlock(&bdev_lock);
+
+ spin_lock(&blockdev_superblock->s_inode_list_lock);
+ list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
+ ret += inode->i_mapping->nrpages;
+ spin_unlock(&blockdev_superblock->s_inode_list_lock);
+
return ret;
}
@@@ -1015,28 -1034,30 +1015,28 @@@ static bool bd_may_claim(struct block_d
}
/**
- * bd_prepare_to_claim - prepare to claim a block device
+ * bd_prepare_to_claim - claim a block device
* @bdev: block device of interest
* @whole: the whole device containing @bdev, may equal @bdev
* @holder: holder trying to claim @bdev
*
- * Prepare to claim @bdev. This function fails if @bdev is already
- * claimed by another holder and waits if another claiming is in
- * progress. This function doesn't actually claim. On successful
- * return, the caller has ownership of bd_claiming and bd_holder[s].
- *
- * CONTEXT:
- * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
- * it multiple times.
+ * Claim @bdev. This function fails if @bdev is already claimed by another
+ * holder and waits if another claiming is in progress. return, the caller
+ * has ownership of bd_claiming and bd_holder[s].
*
* RETURNS:
* 0 if @bdev can be claimed, -EBUSY otherwise.
*/
-static int bd_prepare_to_claim(struct block_device *bdev,
- struct block_device *whole, void *holder)
+int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
+ void *holder)
{
retry:
+ spin_lock(&bdev_lock);
/* if someone else claimed, fail */
- if (!bd_may_claim(bdev, whole, holder))
+ if (!bd_may_claim(bdev, whole, holder)) {
+ spin_unlock(&bdev_lock);
return -EBUSY;
+ }
/* if claiming is already in progress, wait for it to finish */
if (whole->bd_claiming) {
@@@ -1047,15 -1068,13 +1047,15 @@@
spin_unlock(&bdev_lock);
schedule();
finish_wait(wq, &wait);
- spin_lock(&bdev_lock);
goto retry;
}
/* yay, all mine */
+ whole->bd_claiming = holder;
+ spin_unlock(&bdev_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
{
@@@ -1078,6 -1097,78 +1078,6 @@@
return disk;
}
-/**
- * bd_start_claiming - start claiming a block device
- * @bdev: block device of interest
- * @holder: holder trying to claim @bdev
- *
- * @bdev is about to be opened exclusively. Check @bdev can be opened
- * exclusively and mark that an exclusive open is in progress. Each
- * successful call to this function must be matched with a call to
- * either bd_finish_claiming() or bd_abort_claiming() (which do not
- * fail).
- *
- * This function is used to gain exclusive access to the block device
- * without actually causing other exclusive open attempts to fail. It
- * should be used when the open sequence itself requires exclusive
- * access but may subsequently fail.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * Pointer to the block device containing @bdev on success, ERR_PTR()
- * value on failure.
- */
-struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
-{
- struct gendisk *disk;
- struct block_device *whole;
- int partno, err;
-
- might_sleep();
-
- /*
- * @bdev might not have been initialized properly yet, look up
- * and grab the outer block device the hard way.
- */
- disk = bdev_get_gendisk(bdev, &partno);
- if (!disk)
- return ERR_PTR(-ENXIO);
-
- /*
- * Normally, @bdev should equal what's returned from bdget_disk()
- * if partno is 0; however, some drivers (floppy) use multiple
- * bdev's for the same physical device and @bdev may be one of the
- * aliases. Keep @bdev if partno is 0. This means claimer
- * tracking is broken for those devices but it has always been that
- * way.
- */
- if (partno)
- whole = bdget_disk(disk, 0);
- else
- whole = bdgrab(bdev);
-
- put_disk_and_module(disk);
- if (!whole)
- return ERR_PTR(-ENOMEM);
-
- /* prepare to claim, if successful, mark claiming in progress */
- spin_lock(&bdev_lock);
-
- err = bd_prepare_to_claim(bdev, whole, holder);
- if (err == 0) {
- whole->bd_claiming = holder;
- spin_unlock(&bdev_lock);
- return whole;
- } else {
- spin_unlock(&bdev_lock);
- bdput(whole);
- return ERR_PTR(err);
- }
-}
-EXPORT_SYMBOL(bd_start_claiming);
-
static void bd_clear_claiming(struct block_device *whole, void *holder)
{
lockdep_assert_held(&bdev_lock);
@@@ -1090,14 -1181,14 +1090,14 @@@
/**
* bd_finish_claiming - finish claiming of a block device
* @bdev: block device of interest
- * @whole: whole block device (returned from bd_start_claiming())
+ * @whole: whole block device
* @holder: holder that has claimed @bdev
*
* Finish exclusive open of a block device. Mark the device as exlusively
* open by the holder and wake up all waiters for exclusive open to finish.
*/
-void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
- void *holder)
+static void bd_finish_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder)
{
spin_lock(&bdev_lock);
BUG_ON(!bd_may_claim(bdev, whole, holder));
@@@ -1112,11 -1203,12 +1112,11 @@@
bd_clear_claiming(whole, holder);
spin_unlock(&bdev_lock);
}
-EXPORT_SYMBOL(bd_finish_claiming);
/**
* bd_abort_claiming - abort claiming of a block device
* @bdev: block device of interest
- * @whole: whole block device (returned from bd_start_claiming())
+ * @whole: whole block device
* @holder: holder that has claimed @bdev
*
* Abort claiming of a block device when the exclusive open failed. This can be
@@@ -1275,6 -1367,26 +1275,6 @@@ void bd_unlink_disk_holder(struct block
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
#endif
-/**
- * flush_disk - invalidates all buffer-cache entries on a disk
- *
- * @bdev: struct block device to be flushed
- * @kill_dirty: flag to guide handling of dirty inodes
- *
- * Invalidates all buffer-cache entries on a disk. It should be called
- * when a disk has been changed -- either by a media change or online
- * resize.
- */
-static void flush_disk(struct block_device *bdev, bool kill_dirty)
-{
- if (__invalidate_device(bdev, kill_dirty)) {
- printk(KERN_WARNING "VFS: busy inodes on changed media or "
- "resized disk %s\n",
- bdev->bd_disk ? bdev->bd_disk->disk_name : "");
- }
- bdev->bd_invalidated = 1;
-}
-
/**
* check_disk_size_change - checks for disk size change and adjusts bdev size.
* @disk: struct gendisk to check
@@@ -1299,9 -1411,8 +1299,9 @@@ static void check_disk_size_change(stru
disk->disk_name, bdev_size, disk_size);
}
i_size_write(bdev->bd_inode, disk_size);
- if (bdev_size > disk_size)
- flush_disk(bdev, false);
+ if (bdev_size > disk_size && __invalidate_device(bdev, false))
+ pr_warn("VFS: busy inodes on resized disk %s\n",
+ disk->disk_name);
}
bdev->bd_invalidated = 0;
}
@@@ -1360,10 -1471,7 +1360,10 @@@ int check_disk_change(struct block_devi
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return 0;
- flush_disk(bdev, true);
+ if (__invalidate_device(bdev, true))
+ pr_warn("VFS: busy inodes on changed media %s\n",
+ disk->disk_name);
+ bdev->bd_invalidated = 1;
if (bdops->revalidate_disk)
bdops->revalidate_disk(bdev->bd_disk);
return 1;
@@@ -1439,15 -1547,13 +1439,15 @@@ EXPORT_SYMBOL_GPL(bdev_disk_changed)
* mutex_lock_nested(whole->bd_mutex, 1)
*/
-static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
+ int for_part)
{
+ struct block_device *whole = NULL, *claiming = NULL;
struct gendisk *disk;
int ret;
int partno;
int perm = 0;
- bool first_open = false;
+ bool first_open = false, unblock_events = true, need_restart;
if (mode & FMODE_READ)
perm |= MAY_READ;
@@@ -1463,36 -1569,18 +1463,36 @@@
}
restart:
-
+ need_restart = false;
ret = -ENXIO;
disk = bdev_get_gendisk(bdev, &partno);
if (!disk)
goto out;
+ if (partno) {
+ whole = bdget_disk(disk, 0);
+ if (!whole) {
+ ret = -ENOMEM;
+ goto out_put_disk;
+ }
+ }
+
+ if (!for_part && (mode & FMODE_EXCL)) {
+ WARN_ON_ONCE(!holder);
+ if (whole)
+ claiming = whole;
+ else
+ claiming = bdev;
+ ret = bd_prepare_to_claim(bdev, claiming, holder);
+ if (ret)
+ goto out_put_whole;
+ }
+
disk_block_events(disk);
mutex_lock_nested(&bdev->bd_mutex, for_part);
if (!bdev->bd_openers) {
first_open = true;
bdev->bd_disk = disk;
- bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
bdev->bd_partno = partno;
@@@ -1505,12 -1593,20 +1505,12 @@@
ret = 0;
if (disk->fops->open) {
ret = disk->fops->open(bdev, mode);
- if (ret == -ERESTARTSYS) {
- /* Lost a race with 'disk' being
- * deleted, try again.
- * See md.c
- */
- disk_put_part(bdev->bd_part);
- bdev->bd_part = NULL;
- bdev->bd_disk = NULL;
- bdev->bd_queue = NULL;
- mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
- put_disk_and_module(disk);
- goto restart;
- }
+ /*
+ * If we lost a race with 'disk' being deleted,
+ * try again. See md.c
+ */
+ if (ret == -ERESTARTSYS)
+ need_restart = true;
}
if (!ret) {
@@@ -1531,11 -1627,18 +1531,11 @@@
if (ret)
goto out_clear;
} else {
- struct block_device *whole;
- whole = bdget_disk(disk, 0);
- ret = -ENOMEM;
- if (!whole)
- goto out_clear;
BUG_ON(for_part);
- ret = __blkdev_get(whole, mode, 1);
- if (ret) {
- bdput(whole);
+ ret = __blkdev_get(whole, mode, NULL, 1);
+ if (ret)
goto out_clear;
- }
- bdev->bd_contains = whole;
+ bdev->bd_contains = bdgrab(whole);
bdev->bd_part = disk_get_part(disk, partno);
if (!(disk->flags & GENHD_FL_UP) ||
!bdev->bd_part || !bdev->bd_part->nr_sects) {
@@@ -1564,52 -1667,27 +1564,52 @@@
bdev->bd_openers++;
if (for_part)
bdev->bd_part_count++;
+ if (claiming)
+ bd_finish_claiming(bdev, claiming, holder);
+
+ /*
+ * Block event polling for write claims if requested. Any write holder
+ * makes the write_holder state stick until all are released. This is
+ * good enough and tracking individual writeable reference is too
+ * fragile given the way @mode is used in blkdev_get/put().
+ */
+ if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder
&&
+ (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
+ bdev->bd_write_holder = true;
+ unblock_events = false;
+ }
mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
+
+ if (unblock_events)
+ disk_unblock_events(disk);
+
/* only one opener holds refs to the module and disk */
if (!first_open)
put_disk_and_module(disk);
+ if (whole)
+ bdput(whole);
return 0;
out_clear:
disk_put_part(bdev->bd_part);
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
- bdev->bd_queue = NULL;
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
out_unlock_bdev:
+ if (claiming)
+ bd_abort_claiming(bdev, claiming, holder);
mutex_unlock(&bdev->bd_mutex);
disk_unblock_events(disk);
+ out_put_whole:
+ if (whole)
+ bdput(whole);
+ out_put_disk:
put_disk_and_module(disk);
+ if (need_restart)
+ goto restart;
out:
-
return ret;
}
@@@ -1634,11 -1712,50 +1634,11 @@@
*/
int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
{
- struct block_device *whole = NULL;
int res;
- WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
-
- if ((mode & FMODE_EXCL) && holder) {
- whole = bd_start_claiming(bdev, holder);
- if (IS_ERR(whole)) {
- bdput(bdev);
- return PTR_ERR(whole);
- }
- }
-
- res = __blkdev_get(bdev, mode, 0);
-
- if (whole) {
- struct gendisk *disk = whole->bd_disk;
-
- /* finish claiming */
- mutex_lock(&bdev->bd_mutex);
- if (!res)
- bd_finish_claiming(bdev, whole, holder);
- else
- bd_abort_claiming(bdev, whole, holder);
- /*
- * Block event polling for write claims if requested. Any
- * write holder makes the write_holder state stick until
- * all are released. This is good enough and tracking
- * individual writeable reference is too fragile given the
- * way @mode is used in blkdev_get/put().
- */
- if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder
&&
- (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
- bdev->bd_write_holder = true;
- disk_block_events(disk);
- }
-
- mutex_unlock(&bdev->bd_mutex);
- bdput(whole);
- }
-
+ res =__blkdev_get(bdev, mode, holder, 0);
if (res)
bdput(bdev);
-
return res;
}
EXPORT_SYMBOL(blkdev_get);
@@@ -1734,7 -1851,7 +1734,7 @@@ static int blkdev_open(struct inode * i
*/
filp->f_flags |= O_LARGEFILE;
- filp->f_mode |= FMODE_NOWAIT;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
if (filp->f_flags & O_NDELAY)
filp->f_mode |= FMODE_NDELAY;
@@@ -2043,7 -2160,6 +2043,6 @@@ const struct file_operations def_blk_fo
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
#endif
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = blkdev_fallocate,
};
diff --combined fs/btrfs/file.c
index bb824c7cb7c7,f3c81478b04e..5869ee3a504d
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@@ -500,18 -500,18 +500,18 @@@ next
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int err = 0;
int i;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
- loff_t isize = i_size_read(inode);
+ loff_t isize = i_size_read(&inode->vfs_inode);
unsigned int extra_bits = 0;
start_pos = pos & ~((u64) fs_info->sectorsize - 1);
@@@ -524,13 -524,13 +524,13 @@@
* The pages may have already been dirty, clear out old accounting so
* we can set things up properly
*/
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
+ clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, cached);
- if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (!btrfs_is_free_space_inode(inode)) {
if (start_pos >= isize &&
- !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+ !(inode->flags & BTRFS_INODE_PREALLOC)) {
/*
* There can't be any extents following eof in this case
* so just set the delalloc new bit for the range
@@@ -538,7 -538,8 +538,7 @@@
*/
extra_bits |= EXTENT_DELALLOC_NEW;
} else {
- err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
- start_pos,
+ err = btrfs_find_new_delalloc_bytes(inode, start_pos,
num_bytes, cached);
if (err)
return err;
@@@ -563,7 -564,7 +563,7 @@@
* at this time.
*/
if (end_pos > isize)
- i_size_write(inode, end_pos);
+ i_size_write(&inode->vfs_inode, end_pos);
return 0;
}
@@@ -730,7 -731,7 +730,7 @@@ next
* is deleted from the tree.
*/
int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
+ struct btrfs_root *root, struct btrfs_inode *inode,
struct btrfs_path *path, u64 start, u64 end,
u64 *drop_end, int drop_cache,
int replace_extent,
@@@ -743,8 -744,7 +743,8 @@@
struct btrfs_ref ref = { 0 };
struct btrfs_key key;
struct btrfs_key new_key;
- u64 ino = btrfs_ino(BTRFS_I(inode));
+ struct inode *vfs_inode = &inode->vfs_inode;
+ u64 ino = btrfs_ino(inode);
u64 search_start = start;
u64 disk_bytenr = 0;
u64 num_bytes = 0;
@@@ -762,9 -762,9 +762,9 @@@
int leafs_visited = 0;
if (drop_cache)
- btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
+ btrfs_drop_extent_cache(inode, start, end - 1, 0);
- if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
+ if (start >= inode->disk_i_size && !replace_extent)
modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
@@@ -935,7 -935,7 +935,7 @@@ next_slot
extent_end - end);
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(inode, end - key.offset);
+ inode_sub_bytes(vfs_inode, end - key.offset);
break;
}
@@@ -955,7 -955,7 +955,7 @@@
start - key.offset);
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(inode, extent_end - start);
+ inode_sub_bytes(vfs_inode, extent_end - start);
if (end == extent_end)
break;
@@@ -979,7 -979,7 +979,7 @@@ delete_extent_item
if (update_refs &&
extent_type == BTRFS_FILE_EXTENT_INLINE) {
- inode_sub_bytes(inode,
+ inode_sub_bytes(vfs_inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
@@@ -993,7 -993,7 +993,7 @@@
key.offset - extent_offset);
ret = btrfs_free_extent(trans, &ref);
BUG_ON(ret); /* -ENOMEM */
- inode_sub_bytes(inode,
+ inode_sub_bytes(vfs_inode,
extent_end - key.offset);
}
@@@ -1082,8 -1082,8 +1082,8 @@@ int btrfs_drop_extents(struct btrfs_tra
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
- drop_cache, 0, 0, NULL);
+ ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start,
+ end, NULL, drop_cache, 0, 0, NULL);
btrfs_free_path(path);
return ret;
}
@@@ -1532,8 -1532,8 +1532,8 @@@ lock_and_cleanup_extent_if_need(struct
return ret;
}
-static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
- size_t *write_bytes, bool nowait)
+static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
+ size_t *write_bytes, bool nowait)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
@@@ -1541,9 -1541,6 +1541,9 @@@
u64 num_bytes;
int ret;
+ if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
+ return 0;
+
if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
return -EAGAIN;
@@@ -1586,42 -1583,6 +1586,42 @@@ out_unlock
return ret;
}
+static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
+ size_t *write_bytes)
+{
+ return check_can_nocow(inode, pos, write_bytes, true);
+}
+
+/*
+ * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
+ *
+ * @pos: File offset
+ * @write_bytes: The length to write, will be updated to the nocow writeable
+ * range
+ *
+ * This function will flush ordered extents in the range to ensure proper
+ * nocow checks.
+ *
+ * Return:
+ * >0 and update @write_bytes if we can do nocow write
+ * 0 if we can't do nocow write
+ * -EAGAIN if we can't get the needed lock or there are ordered extents
+ * for * (nowait == true) case
+ * <0 if other error happened
+ *
+ * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
+ */
+int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
+ size_t *write_bytes)
+{
+ return check_can_nocow(inode, pos, write_bytes, false);
+}
+
+void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
+{
+ btrfs_drew_write_unlock(&inode->root->snapshot_lock);
+}
+
static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
struct iov_iter *i)
{
@@@ -1629,6 -1590,7 +1629,6 @@@
loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0;
@@@ -1681,12 -1643,13 +1681,12 @@@
fs_info->sectorsize);
extent_changeset_release(data_reserved);
- ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
+ ret = btrfs_check_data_free_space(BTRFS_I(inode),
+ &data_reserved, pos,
write_bytes);
if (ret < 0) {
- if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) &&
- check_can_nocow(BTRFS_I(inode), pos,
- &write_bytes, false) > 0) {
+ if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
+ &write_bytes) > 0) {
/*
* For nodata cow case, no need to reserve
* data space.
@@@ -1711,11 -1674,11 +1711,11 @@@
reserve_bytes);
if (ret) {
if (!only_release_metadata)
- btrfs_free_reserved_data_space(inode,
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
data_reserved, pos,
write_bytes);
else
- btrfs_drew_write_unlock(&root->snapshot_lock);
+ btrfs_check_nocow_unlock(BTRFS_I(inode));
break;
}
@@@ -1785,7 -1748,7 +1785,7 @@@ again
__pos = round_down(pos,
fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT);
- btrfs_delalloc_release_space(inode,
+ btrfs_delalloc_release_space(BTRFS_I(inode),
data_reserved, __pos,
release_bytes, true);
}
@@@ -1795,9 -1758,8 +1795,9 @@@
fs_info->sectorsize);
if (copied > 0)
- ret = btrfs_dirty_pages(inode, pages, dirty_pages,
- pos, copied, &cached_state);
+ ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
+ dirty_pages, pos, copied,
+ &cached_state);
/*
* If we have not locked the extent range, because the range's
@@@ -1820,7 -1782,7 +1820,7 @@@
release_bytes = 0;
if (only_release_metadata)
- btrfs_drew_write_unlock(&root->snapshot_lock);
+ btrfs_check_nocow_unlock(BTRFS_I(inode));
if (only_release_metadata && copied > 0) {
lockstart = round_down(pos,
@@@ -1838,6 -1800,8 +1838,6 @@@
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
- btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
@@@ -1847,12 -1811,11 +1847,12 @@@
if (release_bytes) {
if (only_release_metadata) {
- btrfs_drew_write_unlock(&root->snapshot_lock);
+ btrfs_check_nocow_unlock(BTRFS_I(inode));
btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes, true);
} else {
- btrfs_delalloc_release_space(inode, data_reserved,
+ btrfs_delalloc_release_space(BTRFS_I(inode),
+ data_reserved,
round_down(pos, fs_info->sectorsize),
release_bytes, true);
}
@@@ -1963,8 -1926,10 +1963,8 @@@ static ssize_t btrfs_file_write_iter(st
* We will allocate space in case nodatacow is not set,
* so bail
*/
- if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) ||
- check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
- true) <= 0) {
+ if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes)
+ <= 0) {
inode_unlock(inode);
return -EAGAIN;
}
@@@ -2633,7 -2598,7 +2633,7 @@@ int btrfs_punch_hole_range(struct inod
cur_offset = start;
while (cur_offset < end) {
- ret = __btrfs_drop_extents(trans, root, inode, path,
+ ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path,
cur_offset, end + 1, &drop_end,
1, 0, 0, NULL);
if (ret != -ENOSPC) {
@@@ -3211,14 -3176,14 +3211,14 @@@ reserve_space
if (ret < 0)
goto out;
space_reserved = true;
- ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
- alloc_start, bytes_to_reserve);
- if (ret)
- goto out;
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
&cached_state);
if (ret)
goto out;
+ ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
+ alloc_start, bytes_to_reserve);
+ if (ret)
+ goto out;
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
i_blocksize(inode),
@@@ -3234,7 -3199,7 +3234,7 @@@
ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
out:
if (ret && space_reserved)
- btrfs_free_reserved_data_space(inode, data_reserved,
+ btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
alloc_start, bytes_to_reserve);
extent_changeset_free(data_reserved);
@@@ -3385,9 -3350,8 +3385,9 @@@ static long btrfs_fallocate(struct fil
free_extent_map(em);
break;
}
- ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
- cur_offset, last_byte - cur_offset);
+ ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
+ &data_reserved, cur_offset,
+ last_byte - cur_offset);
if (ret < 0) {
cur_offset = last_byte;
free_extent_map(em);
@@@ -3399,9 -3363,8 +3399,9 @@@
* range, free reserved data space first, otherwise
* it'll result in false ENOSPC error.
*/
- btrfs_free_reserved_data_space(inode, data_reserved,
- cur_offset, last_byte - cur_offset);
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
+ data_reserved, cur_offset,
+ last_byte - cur_offset);
}
free_extent_map(em);
cur_offset = last_byte;
@@@ -3418,7 -3381,7 +3418,7 @@@
range->len, i_blocksize(inode),
offset + len, &alloc_hint);
else
- btrfs_free_reserved_data_space(inode,
+ btrfs_free_reserved_data_space(BTRFS_I(inode),
data_reserved, range->start,
range->len);
list_del(&range->list);
@@@ -3439,7 -3402,7 +3439,7 @@@ out
inode_unlock(inode);
/* Let go of our reservation. */
if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
- btrfs_free_reserved_data_space(inode, data_reserved,
+ btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
cur_offset, alloc_end - cur_offset);
extent_changeset_free(data_reserved);
return ret;
@@@ -3537,14 -3500,13 +3537,13 @@@ static loff_t btrfs_file_llseek(struct
static int btrfs_file_open(struct inode *inode, struct file *filp)
{
- filp->f_mode |= FMODE_NOWAIT;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
return generic_file_open(inode, filp);
}
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
.read_iter = generic_file_read_iter,
- .splice_read = generic_file_splice_read,
.write_iter = btrfs_file_write_iter,
.splice_write = iter_file_splice_write,
.mmap = btrfs_file_mmap,
diff --combined fs/cifs/dfs_cache.c
index a44f58bbf7ab,a4fe155fc92a..768130d5ac0b
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@@ -29,7 -29,6 +29,7 @@@
struct cache_dfs_tgt {
char *name;
+ int path_consumed;
struct list_head list;
};
@@@ -215,7 -214,7 +215,7 @@@ static int dfscache_proc_open(struct in
const struct proc_ops dfscache_proc_ops = {
.proc_open = dfscache_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = dfscache_proc_write,
@@@ -351,7 -350,7 +351,7 @@@ static inline struct timespec64 get_exp
}
/* Allocate a new DFS target */
-static struct cache_dfs_tgt *alloc_target(const char *name)
+static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
{
struct cache_dfs_tgt *t;
@@@ -363,7 -362,6 +363,7 @@@
kfree(t);
return ERR_PTR(-ENOMEM);
}
+ t->path_consumed = path_consumed;
INIT_LIST_HEAD(&t->list);
return t;
}
@@@ -386,7 -384,7 +386,7 @@@ static int copy_ref_data(const struct d
for (i = 0; i < numrefs; i++) {
struct cache_dfs_tgt *t;
- t = alloc_target(refs[i].node_name);
+ t = alloc_target(refs[i].node_name, refs[i].path_consumed);
if (IS_ERR(t)) {
free_tgts(ce);
return PTR_ERR(t);
@@@ -492,7 -490,16 +492,7 @@@ static int add_cache_entry(const char *
return 0;
}
-/*
- * Find a DFS cache entry in hash table and optionally check prefix path against
- * @path.
- * Use whole path components in the match.
- * Must be called with htable_rw_lock held.
- *
- * Return ERR_PTR(-ENOENT) if the entry is not found.
- */
-static struct cache_entry *lookup_cache_entry(const char *path,
- unsigned int *hash)
+static struct cache_entry *__lookup_cache_entry(const char *path)
{
struct cache_entry *ce;
unsigned int h;
@@@ -510,75 -517,9 +510,75 @@@
if (!found)
ce = ERR_PTR(-ENOENT);
+ return ce;
+}
+
+/*
+ * Find a DFS cache entry in hash table and optionally check prefix path against
+ * @path.
+ * Use whole path components in the match.
+ * Must be called with htable_rw_lock held.
+ *
+ * Return ERR_PTR(-ENOENT) if the entry is not found.
+ */
+static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
+{
+ struct cache_entry *ce = ERR_PTR(-ENOENT);
+ unsigned int h;
+ int cnt = 0;
+ char *npath;
+ char *s, *e;
+ char sep;
+
+ npath = kstrndup(path, strlen(path), GFP_KERNEL);
+ if (!npath)
+ return ERR_PTR(-ENOMEM);
+
+ s = npath;
+ sep = *npath;
+ while ((s = strchr(s, sep)) && ++cnt < 3)
+ s++;
+
+ if (cnt < 3) {
+ h = cache_entry_hash(path, strlen(path));
+ ce = __lookup_cache_entry(path);
+ goto out;
+ }
+ /*
+ * Handle paths that have more than two path components and are a complete prefix of
the DFS
+ * referral request path (@path).
+ *
+ * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral
Request".
+ */
+ h = cache_entry_hash(npath, strlen(npath));
+ e = npath + strlen(npath) - 1;
+ while (e > s) {
+ char tmp;
+
+ /* skip separators */
+ while (e > s && *e == sep)
+ e--;
+ if (e == s)
+ goto out;
+
+ tmp = *(e+1);
+ *(e+1) = 0;
+
+ ce = __lookup_cache_entry(npath);
+ if (!IS_ERR(ce)) {
+ h = cache_entry_hash(npath, strlen(npath));
+ break;
+ }
+
+ *(e+1) = tmp;
+ /* backward until separator */
+ while (e > s && *e != sep)
+ e--;
+ }
+out:
if (hash)
*hash = h;
-
+ kfree(npath);
return ce;
}
@@@ -832,7 -773,6 +832,7 @@@ static int get_targets(struct cache_ent
rc = -ENOMEM;
goto err_free_it;
}
+ it->it_path_consumed = t->path_consumed;
if (ce->tgthint == t)
list_add(&it->it_list, head);
@@@ -1323,26 -1263,23 +1323,26 @@@ void dfs_cache_del_vol(const char *full
/**
* dfs_cache_get_tgt_share - parse a DFS target
*
+ * @path: DFS full path
* @it: DFS target iterator.
* @share: tree name.
- * @share_len: length of tree name.
* @prefix: prefix path.
- * @prefix_len: length of prefix path.
*
* Return zero if target was parsed correctly, otherwise non-zero.
*/
-int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator *it,
- const char **share, size_t *share_len,
- const char **prefix, size_t *prefix_len)
+int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
+ char **share, char **prefix)
{
- char *s, sep;
+ char *s, sep, *p;
+ size_t len;
+ size_t plen1, plen2;
- if (!it || !share || !share_len || !prefix || !prefix_len)
+ if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
return -EINVAL;
+ *share = NULL;
+ *prefix = NULL;
+
sep = it->it_name[0];
if (sep != '\\' && sep != '/')
return -EINVAL;
@@@ -1351,38 -1288,13 +1351,38 @@@
if (!s)
return -EINVAL;
+ /* point to prefix in target node */
s = strchrnul(s + 1, sep);
- *share = it->it_name;
- *share_len = s - it->it_name;
- *prefix = *s ? s + 1 : s;
- *prefix_len = &it->it_name[strlen(it->it_name)] - *prefix;
+ /* extract target share */
+ *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
+ if (!*share)
+ return -ENOMEM;
+ /* skip separator */
+ if (*s)
+ s++;
+ /* point to prefix in DFS path */
+ p = path + it->it_path_consumed;
+ if (*p == sep)
+ p++;
+
+ /* merge prefix paths from DFS path and target node */
+ plen1 = it->it_name + strlen(it->it_name) - s;
+ plen2 = path + strlen(path) - p;
+ if (plen1 || plen2) {
+ len = plen1 + plen2 + 2;
+ *prefix = kmalloc(len, GFP_KERNEL);
+ if (!*prefix) {
+ kfree(*share);
+ *share = NULL;
+ return -ENOMEM;
+ }
+ if (plen1)
+ scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
+ else
+ strscpy(*prefix, p, len);
+ }
return 0;
}
diff --combined fs/debugfs/file.c
index b167d2d02148,dcd7bdaf6741..e2e07eec50f1
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@@ -231,6 -231,10 +231,10 @@@ FULL_PROXY_FUNC(read, ssize_t, filp
loff_t *ppos),
ARGS(filp, buf, size, ppos));
+ FULL_PROXY_FUNC(read_iter, ssize_t, iocb->ki_filp,
+ PROTO(struct kiocb *iocb, struct iov_iter *iter),
+ ARGS(iocb, iter));
+
FULL_PROXY_FUNC(write, ssize_t, filp,
PROTO(struct file *filp, const char __user *buf, size_t size,
loff_t *ppos),
@@@ -273,7 -277,7 +277,7 @@@ static int full_proxy_release(struct in
r = real_fops->release(inode, filp);
replace_fops(filp, d_inode(dentry)->i_fop);
- kfree((void *)proxy_fops);
+ kfree(proxy_fops);
fops_put(real_fops);
return r;
}
@@@ -286,6 -290,8 +290,8 @@@ static void __full_proxy_fops_init(stru
proxy_fops->llseek = full_proxy_llseek;
if (real_fops->read)
proxy_fops->read = full_proxy_read;
+ if (real_fops->read_iter)
+ proxy_fops->read_iter = full_proxy_read_iter;
if (real_fops->write)
proxy_fops->write = full_proxy_write;
if (real_fops->poll)
@@@ -918,6 -924,11 +924,6 @@@ struct dentry *debugfs_create_blob(cons
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
-struct array_data {
- void *array;
- u32 elements;
-};
-
static size_t u32_format_array(char *buf, size_t bufsize,
u32 *array, int array_size)
{
@@@ -938,8 -949,8 +944,8 @@@
static int u32_array_open(struct inode *inode, struct file *file)
{
- struct array_data *data = inode->i_private;
- int size, elements = data->elements;
+ struct debugfs_u32_array *data = inode->i_private;
+ int size, elements = data->n_elements;
char *buf;
/*
@@@ -954,7 -965,7 +960,7 @@@
buf[size] = 0;
file->private_data = buf;
- u32_format_array(buf, size, data->array, data->elements);
+ u32_format_array(buf, size, data->array, data->n_elements);
return nonseekable_open(inode, file);
}
@@@ -991,7 -1002,8 +997,7 @@@ static const struct file_operations u32
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
- * @array: u32 array that provides data.
- * @elements: total number of elements in the array.
+ * @array: wrapper struct containing data pointer and size of the array.
*
* This function creates a file in debugfs with the given name that exports
* @array as data. If the @mode variable is so set it can be read from.
@@@ -999,10 -1011,17 +1005,10 @@@
* Once array is created its size can not be changed.
*/
void debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent, u32 *array, u32 elements)
+ struct dentry *parent,
+ struct debugfs_u32_array *array)
{
- struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
- if (data == NULL)
- return;
-
- data->array = array;
- data->elements = elements;
-
- debugfs_create_file_unsafe(name, mode, parent, data, &u32_array_fops);
+ debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
@@@ -1067,7 -1086,7 +1073,7 @@@ static int debugfs_open_regset32(struc
static const struct file_operations fops_regset32 = {
.open = debugfs_open_regset32,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1113,7 -1132,7 +1119,7 @@@ static const struct file_operations deb
.owner = THIS_MODULE,
.open = debugfs_devm_entry_open,
.release = single_release,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek
};
diff --combined fs/exfat/file.c
index 6bdabfd4b134,8fe5df8a9ccb..f71b599f9bfe
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@@ -154,7 -154,6 +154,7 @@@ int __exfat_truncate(struct inode *inod
struct timespec64 ts;
struct exfat_dentry *ep, *ep2;
struct exfat_entry_set_cache *es;
+ int err;
es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
ES_ALL_ENTRIES);
@@@ -177,7 -176,7 +177,7 @@@
ep2->dentry.stream.size = 0;
} else {
ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
- ep2->dentry.stream.size = ep->dentry.stream.valid_size;
+ ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
}
if (new_size == 0) {
@@@ -189,9 -188,7 +189,9 @@@
}
exfat_update_dir_chksum_with_entry_set(es);
- exfat_free_dentry_set(es, inode_needs_sync(inode));
+ err = exfat_free_dentry_set(es, inode_needs_sync(inode));
+ if (err)
+ return err;
}
/* cut off from the FAT chain */
@@@ -372,7 -369,6 +372,6 @@@ const struct file_operations exfat_file
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = exfat_file_fsync,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
};
diff --combined fs/ext4/file.c
index 129cc1dd6b79,f5dc9a4e0937..5a3b61130501
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@@ -544,8 -544,6 +544,8 @@@ static ssize_t ext4_dio_write_iter(stru
iomap_ops = &ext4_iomap_overwrite_ops;
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
is_sync_kiocb(iocb) || unaligned_io || extend);
+ if (ret == -ENOTBLK)
+ ret = 0;
if (extend)
ret = ext4_handle_inode_extension(inode, offset, ret, count);
@@@ -898,7 -896,6 +898,6 @@@ const struct file_operations ext4_file_
.release = ext4_release_file,
.fsync = ext4_sync_file,
.get_unmapped_area = thp_get_unmapped_area,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
};
diff --combined fs/f2fs/file.c
index cc7f5670390f,6b34caf13b56..a4da2c356097
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@@ -21,7 -21,6 +21,7 @@@
#include <linux/uuid.h>
#include <linux/file.h>
#include <linux/nls.h>
+#include <linux/sched/signal.h>
#include "f2fs.h"
#include "node.h"
@@@ -106,11 -105,11 +106,11 @@@ static vm_fault_t f2fs_vm_page_mkwrite(
if (need_alloc) {
/* block allocation */
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_block(&dn, page->index);
f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@@ -1374,6 -1373,8 +1374,6 @@@ static int f2fs_collapse_range(struct i
truncate_pagecache(inode, offset);
new_size = i_size_read(inode) - len;
- truncate_pagecache(inode, new_size);
-
ret = f2fs_truncate_blocks(inode, new_size, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
@@@ -1659,7 -1660,7 +1659,7 @@@ next_alloc
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
f2fs_lock_op(sbi);
- f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA);
f2fs_unlock_op(sbi);
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
@@@ -2526,11 -2527,6 +2526,11 @@@ do_more
}
ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
+ if (ret) {
+ if (ret == -EBUSY)
+ ret = -EAGAIN;
+ goto out;
+ }
range.start += BLKS_PER_SEC(sbi);
if (range.start <= end)
goto do_more;
@@@ -3363,7 -3359,7 +3363,7 @@@ static int f2fs_ioc_measure_verity(stru
return fsverity_ioctl_measure(filp, (void __user *)arg);
}
-static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@@ -3389,7 -3385,7 +3389,7 @@@
return err;
}
-static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@@ -3760,193 -3756,6 +3760,193 @@@ out
return ret;
}
+static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
+ pgoff_t off, block_t block, block_t len, u32 flags)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ sector_t sector = SECTOR_FROM_BLOCK(block);
+ sector_t nr_sects = SECTOR_FROM_BLOCK(len);
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (flags & F2FS_TRIM_FILE_DISCARD)
+ ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
+ blk_queue_secure_erase(q) ?
+ BLKDEV_DISCARD_SECURE : 0);
+
+ if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
+ if (IS_ENCRYPTED(inode))
+ ret = fscrypt_zeroout_range(inode, off, block, len);
+ else
+ ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
+ GFP_NOFS, 0);
+ }
+
+ return ret;
+}
+
+static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct block_device *prev_bdev = NULL;
+ struct f2fs_sectrim_range range;
+ pgoff_t index, pg_end, prev_index = 0;
+ block_t prev_block = 0, len = 0;
+ loff_t end_addr;
+ bool to_end = false;
+ int ret = 0;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
+ !S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
+ !f2fs_hw_support_discard(sbi)) ||
+ ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
+ IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
+ return -EOPNOTSUPP;
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
+ range.start >= inode->i_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (range.len == 0)
+ goto err;
+
+ if (inode->i_size - range.start > range.len) {
+ end_addr = range.start + range.len;
+ } else {
+ end_addr = range.len == (u64)-1 ?
+ sbi->sb->s_maxbytes : inode->i_size;
+ to_end = true;
+ }
+
+ if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
+ (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ index = F2FS_BYTES_TO_BLK(range.start);
+ pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
+
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto err;
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ ret = filemap_write_and_wait_range(mapping, range.start,
+ to_end ? LLONG_MAX : end_addr - 1);
+ if (ret)
+ goto out;
+
+ truncate_inode_pages_range(mapping, range.start,
+ to_end ? -1 : end_addr - 1);
+
+ while (index < pg_end) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+ int i;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ index = f2fs_get_next_page_offset(&dn, index);
+ continue;
+ }
+ goto out;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, pg_end - index);
+ for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
+ struct block_device *cur_bdev;
+ block_t blkaddr = f2fs_data_blkaddr(&dn);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ ret = -EFSCORRUPTED;
+ f2fs_put_dnode(&dn);
+ goto out;
+ }
+
+ cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
+ if (f2fs_is_multi_device(sbi)) {
+ int di = f2fs_target_device_index(sbi, blkaddr);
+
+ blkaddr -= FDEV(di).start_blk;
+ }
+
+ if (len) {
+ if (prev_bdev == cur_bdev &&
+ index == prev_index + len &&
+ blkaddr == prev_block + len) {
+ len++;
+ } else {
+ ret = f2fs_secure_erase(prev_bdev,
+ inode, prev_index, prev_block,
+ len, range.flags);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ goto out;
+ }
+
+ len = 0;
+ }
+ }
+
+ if (!len) {
+ prev_bdev = cur_bdev;
+ prev_index = index;
+ prev_block = blkaddr;
+ len = 1;
+ }
+ }
+
+ f2fs_put_dnode(&dn);
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ cond_resched();
+ }
+
+ if (len)
+ ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
+ prev_block, len, range.flags);
+out:
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+err:
+ inode_unlock(inode);
+ file_end_write(filp);
+
+ return ret;
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@@ -3955,11 -3764,11 +3955,11 @@@
return -ENOSPC;
switch (cmd) {
- case F2FS_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
return f2fs_ioc_getflags(filp, arg);
- case F2FS_IOC_SETFLAGS:
+ case FS_IOC_SETFLAGS:
return f2fs_ioc_setflags(filp, arg);
- case F2FS_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
return f2fs_ioc_getversion(filp, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
return f2fs_ioc_start_atomic_write(filp);
@@@ -3975,11 -3784,11 +3975,11 @@@
return f2fs_ioc_shutdown(filp, arg);
case FITRIM:
return f2fs_ioc_fitrim(filp, arg);
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
return f2fs_ioc_set_encryption_policy(filp, arg);
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
return f2fs_ioc_get_encryption_policy(filp, arg);
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
return f2fs_ioc_get_encryption_pwsalt(filp, arg);
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
return f2fs_ioc_get_encryption_policy_ex(filp, arg);
@@@ -4007,9 -3816,9 +4007,9 @@@
return f2fs_ioc_flush_device(filp, arg);
case F2FS_IOC_GET_FEATURES:
return f2fs_ioc_get_features(filp, arg);
- case F2FS_IOC_FSGETXATTR:
+ case FS_IOC_FSGETXATTR:
return f2fs_ioc_fsgetxattr(filp, arg);
- case F2FS_IOC_FSSETXATTR:
+ case FS_IOC_FSSETXATTR:
return f2fs_ioc_fssetxattr(filp, arg);
case F2FS_IOC_GET_PIN_FILE:
return f2fs_ioc_get_pin_file(filp, arg);
@@@ -4023,18 -3832,16 +4023,18 @@@
return f2fs_ioc_enable_verity(filp, arg);
case FS_IOC_MEASURE_VERITY:
return f2fs_ioc_measure_verity(filp, arg);
- case F2FS_IOC_GET_VOLUME_NAME:
- return f2fs_get_volume_name(filp, arg);
- case F2FS_IOC_SET_VOLUME_NAME:
- return f2fs_set_volume_name(filp, arg);
+ case FS_IOC_GETFSLABEL:
+ return f2fs_ioc_getfslabel(filp, arg);
+ case FS_IOC_SETFSLABEL:
+ return f2fs_ioc_setfslabel(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
return f2fs_get_compress_blocks(filp, arg);
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
return f2fs_release_compress_blocks(filp, arg);
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
return f2fs_reserve_compress_blocks(filp, arg);
+ case F2FS_IOC_SEC_TRIM_FILE:
+ return f2fs_sec_trim_file(filp, arg);
default:
return -ENOTTY;
}
@@@ -4159,14 -3966,14 +4159,14 @@@ out
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
- case F2FS_IOC32_GETFLAGS:
- cmd = F2FS_IOC_GETFLAGS;
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
break;
- case F2FS_IOC32_SETFLAGS:
- cmd = F2FS_IOC_SETFLAGS;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
break;
- case F2FS_IOC32_GETVERSION:
- cmd = F2FS_IOC_GETVERSION;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
break;
case F2FS_IOC_START_ATOMIC_WRITE:
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
@@@ -4175,9 -3982,9 +4175,9 @@@
case F2FS_IOC_ABORT_VOLATILE_WRITE:
case F2FS_IOC_SHUTDOWN:
case FITRIM:
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
case FS_IOC_ADD_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY:
@@@ -4191,20 -3998,19 +4191,20 @@@
case F2FS_IOC_MOVE_RANGE:
case F2FS_IOC_FLUSH_DEVICE:
case F2FS_IOC_GET_FEATURES:
- case F2FS_IOC_FSGETXATTR:
- case F2FS_IOC_FSSETXATTR:
+ case FS_IOC_FSGETXATTR:
+ case FS_IOC_FSSETXATTR:
case F2FS_IOC_GET_PIN_FILE:
case F2FS_IOC_SET_PIN_FILE:
case F2FS_IOC_PRECACHE_EXTENTS:
case F2FS_IOC_RESIZE_FS:
case FS_IOC_ENABLE_VERITY:
case FS_IOC_MEASURE_VERITY:
- case F2FS_IOC_GET_VOLUME_NAME:
- case F2FS_IOC_SET_VOLUME_NAME:
+ case FS_IOC_GETFSLABEL:
+ case FS_IOC_SETFSLABEL:
case F2FS_IOC_GET_COMPRESS_BLOCKS:
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+ case F2FS_IOC_SEC_TRIM_FILE:
break;
default:
return -ENOIOCTLCMD;
@@@ -4227,6 -4033,5 +4227,5 @@@ const struct file_operations f2fs_file_
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#endif
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
};
diff --combined fs/fuse/file.c
index 6611ef3269a8,a404e147bb2c..e727562b9461
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@@ -18,7 -18,6 +18,7 @@@
#include <linux/swap.h>
#include <linux/falloc.h>
#include <linux/uio.h>
+#include <linux/fs.h>
static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
struct fuse_page_desc **desc)
@@@ -1587,6 -1586,7 +1587,6 @@@ static void fuse_writepage_finish(struc
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- rb_erase(&wpa->writepages_entry, &fi->writepages);
for (i = 0; i < ap->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
@@@ -1637,7 -1637,6 +1637,7 @@@ __acquires(fi->lock
out_free:
fi->writectr--;
+ rb_erase(&wpa->writepages_entry, &fi->writepages);
fuse_writepage_finish(fc, wpa);
spin_unlock(&fi->lock);
@@@ -1675,8 -1674,7 +1675,8 @@@ __acquires(fi->lock
}
}
-static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
+ struct fuse_writepage_args *wpa)
{
pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
@@@ -1699,17 -1697,11 +1699,17 @@@
else if (idx_to < curr_index)
p = &(*p)->rb_left;
else
- return (void) WARN_ON(true);
+ return curr;
}
rb_link_node(&wpa->writepages_entry, parent, p);
rb_insert_color(&wpa->writepages_entry, root);
+ return NULL;
+}
+
+static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+{
+ WARN_ON(fuse_insert_writeback(root, wpa));
}
static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
@@@ -1722,7 -1714,6 +1722,7 @@@
mapping_set_error(inode->i_mapping, error);
spin_lock(&fi->lock);
+ rb_erase(&wpa->writepages_entry, &fi->writepages);
while (wpa->next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &wpa->ia.write.in;
@@@ -1961,14 -1952,14 +1961,14 @@@ static void fuse_writepages_send(struc
}
/*
- * First recheck under fi->lock if the offending offset is still under
- * writeback. If yes, then iterate auxiliary write requests, to see if there's
+ * Check under fi->lock if the page is under writeback, and insert it onto the
+ * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
* one already added for a page at this offset. If there's none, then insert
* this new request onto the auxiliary list, otherwise reuse the existing one by
- * copying the new page contents over to the old temporary page.
+ * swapping the new temp page with the old one.
*/
-static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
- struct page *page)
+static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
+ struct page *page)
{
struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
struct fuse_writepage_args *tmp;
@@@ -1976,15 -1967,17 +1976,15 @@@
struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
WARN_ON(new_ap->num_pages != 0);
+ new_ap->num_pages = 1;
spin_lock(&fi->lock);
- rb_erase(&new_wpa->writepages_entry, &fi->writepages);
- old_wpa = fuse_find_writeback(fi, page->index, page->index);
+ old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
if (!old_wpa) {
- tree_insert(&fi->writepages, new_wpa);
spin_unlock(&fi->lock);
- return false;
+ return true;
}
- new_ap->num_pages = 1;
for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
pgoff_t curr_index;
@@@ -2013,41 -2006,7 +2013,41 @@@
fuse_writepage_free(new_wpa);
}
- return true;
+ return false;
+}
+
+static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
+ struct fuse_args_pages *ap,
+ struct fuse_fill_wb_data *data)
+{
+ WARN_ON(!ap->num_pages);
+
+ /*
+ * Being under writeback is unlikely but possible. For example direct
+ * read to an mmaped fuse file will set the page dirty twice; once when
+ * the pages are faulted with get_user_pages(), and then after the read
+ * completed.
+ */
+ if (fuse_page_is_writeback(data->inode, page->index))
+ return true;
+
+ /* Reached max pages */
+ if (ap->num_pages == fc->max_pages)
+ return true;
+
+ /* Reached max write bytes */
+ if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
+ return true;
+
+ /* Discontinuity */
+ if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
+ return true;
+
+ /* Need to grow the pages array? If so, did the expansion fail? */
+ if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
+ return true;
+
+ return false;
}
static int fuse_writepages_fill(struct page *page,
@@@ -2060,6 -2019,7 +2060,6 @@@
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct page *tmp_page;
- bool is_writeback;
int err;
if (!data->ff) {
@@@ -2069,9 -2029,25 +2069,9 @@@
goto out_unlock;
}
- /*
- * Being under writeback is unlikely but possible. For example direct
- * read to an mmaped fuse file will set the page dirty twice; once when
- * the pages are faulted with get_user_pages(), and then after the read
- * completed.
- */
- is_writeback = fuse_page_is_writeback(inode, page->index);
-
- if (wpa && ap->num_pages &&
- (is_writeback || ap->num_pages == fc->max_pages ||
- (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
- data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
+ if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
fuse_writepages_send(data);
data->wpa = NULL;
- } else if (wpa && ap->num_pages == data->max_pages) {
- if (!fuse_pages_realloc(data)) {
- fuse_writepages_send(data);
- data->wpa = NULL;
- }
}
err = -ENOMEM;
@@@ -2109,6 -2085,12 +2109,6 @@@
ap->args.end = fuse_writepage_end;
ap->num_pages = 0;
wpa->inode = inode;
-
- spin_lock(&fi->lock);
- tree_insert(&fi->writepages, wpa);
- spin_unlock(&fi->lock);
-
- data->wpa = wpa;
}
set_page_writeback(page);
@@@ -2116,25 -2098,26 +2116,25 @@@
ap->pages[ap->num_pages] = tmp_page;
ap->descs[ap->num_pages].offset = 0;
ap->descs[ap->num_pages].length = PAGE_SIZE;
+ data->orig_pages[ap->num_pages] = page;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
- if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
+ if (data->wpa) {
+ /*
+ * Protected by fi->lock against concurrent access by
+ * fuse_page_is_writeback().
+ */
+ spin_lock(&fi->lock);
+ ap->num_pages++;
+ spin_unlock(&fi->lock);
+ } else if (fuse_writepage_add(wpa, page)) {
+ data->wpa = wpa;
+ } else {
end_page_writeback(page);
- data->wpa = NULL;
- goto out_unlock;
}
- data->orig_pages[ap->num_pages] = page;
-
- /*
- * Protected by fi->lock against concurrent access by
- * fuse_page_is_writeback().
- */
- spin_lock(&fi->lock);
- ap->num_pages++;
- spin_unlock(&fi->lock);
-
out_unlock:
unlock_page(page);
@@@ -2166,8 -2149,10 +2166,8 @@@ static int fuse_writepages(struct addre
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
if (data.wpa) {
- /* Ignore errors if we can write at least one page */
WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
- err = 0;
}
if (data.ff)
fuse_file_put(data.ff, false, false);
@@@ -2776,16 -2761,7 +2776,16 @@@ long fuse_do_ioctl(struct file *file, u
struct iovec *iov = iov_page;
iov->iov_base = (void __user *)arg;
- iov->iov_len = _IOC_SIZE(cmd);
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ case FS_IOC_SETFLAGS:
+ iov->iov_len = sizeof(int);
+ break;
+ default:
+ iov->iov_len = _IOC_SIZE(cmd);
+ break;
+ }
if (_IOC_DIR(cmd) & _IOC_WRITE) {
in_iov = iov;
@@@ -2987,7 -2963,7 +2987,7 @@@ static void fuse_register_polled_file(s
{
spin_lock(&fc->lock);
if (RB_EMPTY_NODE(&ff->polled_node)) {
- struct rb_node **link, *uninitialized_var(parent);
+ struct rb_node **link, *parent;
link = fuse_find_polled_node(fc, ff->kh, &parent);
BUG_ON(*link);
@@@ -3406,7 -3382,6 +3406,6 @@@ static const struct file_operations fus
.fsync = fuse_fsync,
.lock = fuse_file_lock,
.flock = fuse_file_flock,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
diff --combined fs/gfs2/file.c
index b39b339feddc,3e0f0435c96a..b7ac74272479
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@@ -781,39 -781,39 +781,39 @@@ static int gfs2_fsync(struct file *file
return ret ? ret : ret1;
}
-static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
+ struct gfs2_holder *gh)
{
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
size_t count = iov_iter_count(to);
- struct gfs2_holder gh;
ssize_t ret;
if (!count)
return 0; /* skip atime */
- gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
- ret = gfs2_glock_nq(&gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
+ ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
is_sync_kiocb(iocb));
- gfs2_glock_dq(&gh);
+ gfs2_glock_dq(gh);
out_uninit:
- gfs2_holder_uninit(&gh);
+ gfs2_holder_uninit(gh);
return ret;
}
-static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
+ struct gfs2_holder *gh)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
size_t len = iov_iter_count(from);
loff_t offset = iocb->ki_pos;
- struct gfs2_holder gh;
ssize_t ret;
/*
@@@ -824,8 -824,8 +824,8 @@@
* unfortunately, have the option of only flushing a range like the
* VFS does.
*/
- gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
- ret = gfs2_glock_nq(&gh);
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
+ ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
@@@ -835,12 -835,11 +835,12 @@@
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
is_sync_kiocb(iocb));
-
+ if (ret == -ENOTBLK)
+ ret = 0;
out:
- gfs2_glock_dq(&gh);
+ gfs2_glock_dq(gh);
out_uninit:
- gfs2_holder_uninit(&gh);
+ gfs2_holder_uninit(gh);
return ret;
}
@@@ -852,7 -851,7 +852,7 @@@ static ssize_t gfs2_file_read_iter(stru
ssize_t ret;
if (iocb->ki_flags & IOCB_DIRECT) {
- ret = gfs2_file_direct_read(iocb, to);
+ ret = gfs2_file_direct_read(iocb, to, &gh);
if (likely(ret != -ENOTBLK))
return ret;
iocb->ki_flags &= ~IOCB_DIRECT;
@@@ -901,12 -900,13 +901,12 @@@ static ssize_t gfs2_file_write_iter(str
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
ssize_t ret;
gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
if (iocb->ki_flags & IOCB_APPEND) {
- struct gfs2_holder gh;
-
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret)
return ret;
@@@ -930,7 -930,7 +930,7 @@@
struct address_space *mapping = file->f_mapping;
ssize_t buffered, ret2;
- ret = gfs2_file_direct_write(iocb, from);
+ ret = gfs2_file_direct_write(iocb, from, &gh);
if (ret < 0 || !iov_iter_count(from))
goto out_unlock;
@@@ -1371,7 -1371,6 +1371,6 @@@ const struct file_operations gfs2_file_
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
- .splice_read = generic_file_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
@@@ -1402,7 -1401,6 +1401,6 @@@ const struct file_operations gfs2_file_
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
- .splice_read = generic_file_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = generic_setlease,
.fallocate = gfs2_fallocate,
diff --combined fs/gfs2/glock.c
index 57134d326cfa,ffc9e4273a23..63a6c3e391e9
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@@ -2106,12 -2106,6 +2106,12 @@@ static const char *gflags2str(char *buf
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
+ if (test_bit(GLF_INODE_CREATING, gflags))
+ *p++ = 'c';
+ if (test_bit(GLF_PENDING_DELETE, gflags))
+ *p++ = 'P';
+ if (test_bit(GLF_FREEING, gflags))
+ *p++ = 'x';
*p = 0;
return buf;
}
@@@ -2479,7 -2473,7 +2479,7 @@@ static int gfs2_sbstats_open(struct ino
static const struct file_operations gfs2_glocks_fops = {
.owner = THIS_MODULE,
.open = gfs2_glocks_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = gfs2_glocks_release,
};
@@@ -2487,7 -2481,7 +2487,7 @@@
static const struct file_operations gfs2_glstats_fops = {
.owner = THIS_MODULE,
.open = gfs2_glstats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = gfs2_glocks_release,
};
@@@ -2495,7 -2489,7 +2495,7 @@@
static const struct file_operations gfs2_sbstats_fops = {
.owner = THIS_MODULE,
.open = gfs2_sbstats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined fs/hfs/inode.c
index f35a37c65e5f,6181d3818e17..8d78da646f74
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@@ -17,7 -17,6 +17,7 @@@
#include <linux/cred.h>
#include <linux/uio.h>
#include <linux/xattr.h>
+#include <linux/blkdev.h>
#include "hfs_fs.h"
#include "btree.h"
@@@ -683,7 -682,6 +683,6 @@@ static const struct file_operations hfs
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
.fsync = hfs_file_fsync,
.open = hfs_file_open,
.release = hfs_file_release,
diff --combined fs/nfs/file.c
index f96367a2463e,8ba06f6c3ec5..0922833c90e6
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@@ -83,6 -83,7 +83,6 @@@ nfs_file_release(struct inode *inode, s
dprintk("NFS: release(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
- inode_dio_wait(inode);
nfs_file_clear_open_context(filp);
return 0;
}
@@@ -850,7 -851,6 +850,6 @@@ const struct file_operations nfs_file_o
.fsync = nfs_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
diff --combined fs/nfsd/nfs4state.c
index c9056316a0b3,58d80a699e06..e809fe330518
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@@ -507,17 -507,6 +507,17 @@@ find_any_file(struct nfs4_file *f
return ret;
}
+static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
+{
+ struct nfsd_file *ret = NULL;
+
+ spin_lock(&f->fi_lock);
+ if (f->fi_deleg_file)
+ ret = nfsd_file_get(f->fi_deleg_file);
+ spin_unlock(&f->fi_lock);
+ return ret;
+}
+
static atomic_long_t num_delegations;
unsigned long max_delegations;
@@@ -2375,7 -2364,7 +2375,7 @@@ static int client_info_open(struct inod
static const struct file_operations client_info_fops = {
.open = client_info_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -2455,8 -2444,6 +2455,8 @@@ static int nfs4_show_open(struct seq_fi
oo = ols->st_stateowner;
nf = st->sc_file;
file = find_any_file(nf);
+ if (!file)
+ return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@@ -2494,8 -2481,6 +2494,8 @@@ static int nfs4_show_lock(struct seq_fi
oo = ols->st_stateowner;
nf = st->sc_file;
file = find_any_file(nf);
+ if (!file)
+ return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@@ -2528,9 -2513,7 +2528,9 @@@ static int nfs4_show_deleg(struct seq_f
ds = delegstateid(st);
nf = st->sc_file;
- file = nf->fi_deleg_file;
+ file = find_deleg_file(nf);
+ if (!file)
+ return 0;
seq_printf(s, "- ");
nfs4_show_stateid(s, &st->sc_stateid);
@@@ -2546,7 -2529,6 +2546,7 @@@
seq_printf(s, ", ");
nfs4_show_fname(s, file);
seq_printf(s, " }\n");
+ nfsd_file_put(file);
return 0;
}
@@@ -2629,7 -2611,7 +2629,7 @@@ static int client_opens_release(struct
static const struct file_operations client_states_fops = {
.open = client_states_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = client_opens_release,
};
diff --combined fs/nfsd/nfsctl.c
index 7ae236113040,c6f2faa759d1..b36911385f5d
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@@ -159,7 -159,7 +159,7 @@@ static int exports_proc_open(struct ino
static const struct proc_ops exports_proc_ops = {
.proc_open = exports_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
@@@ -171,7 -171,7 +171,7 @@@ static int exports_nfsd_open(struct ino
static const struct file_operations exports_nfsd_operations = {
.open = exports_nfsd_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -189,7 -189,7 +189,7 @@@ static int export_features_open(struct
static const struct file_operations export_features_operations = {
.open = export_features_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -208,7 -208,7 +208,7 @@@ static int supported_enctypes_open(stru
static const struct file_operations supported_enctypes_ops = {
.open = supported_enctypes_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -216,14 -216,14 +216,14 @@@
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = nfsd_pool_stats_release,
};
static const struct file_operations reply_cache_stats_operations = {
.open = nfsd_reply_cache_stats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -351,7 -351,7 +351,7 @@@ static ssize_t write_unlock_fs(struct f
static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
{
char *dname, *path;
- int uninitialized_var(maxsize);
+ int maxsize;
char *mesg = buf;
int len;
struct auth_domain *dom;
diff --combined fs/omfs/file.c
index 2c7b70ee1388,0dddc6c644c1..cae3f4dcae54
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@@ -220,7 -220,7 +220,7 @@@ static int omfs_get_block(struct inode
struct buffer_head *bh;
sector_t next, offset;
int ret;
- u64 uninitialized_var(new_block);
+ u64 new_block;
u32 max_extents;
int extent_count;
struct omfs_extent *oe;
@@@ -340,7 -340,6 +340,6 @@@ const struct file_operations omfs_file_
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.fsync = generic_file_fsync,
- .splice_read = generic_file_splice_read,
};
static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
diff --combined fs/proc/array.c
index 65ec2029fa80,49456efa31e3..5f6c8a8cb701
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@@ -341,8 -341,6 +341,8 @@@ static inline void task_seccomp(struct
seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
#ifdef CONFIG_SECCOMP
seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
+ seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
+ atomic_read(&p->seccomp.filter_count));
#endif
seq_puts(m, "\nSpeculation_Store_Bypass:\t");
switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
@@@ -768,7 -766,7 +768,7 @@@ static int children_seq_open(struct ino
const struct file_operations proc_tid_children_operations = {
.open = children_seq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
diff --combined fs/proc/base.c
index a333caeca291,6f2e921c471e..358ca390349e
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@@ -538,7 -538,7 +538,7 @@@ static ssize_t lstats_write(struct fil
static const struct file_operations proc_lstats_operations = {
.open = lstats_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = lstats_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -776,7 -776,7 +776,7 @@@ static int proc_single_open(struct inod
static const struct file_operations proc_single_file_operations = {
.open = proc_single_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
@@@ -1460,7 -1460,7 +1460,7 @@@ static int sched_open(struct inode *ino
static const struct file_operations proc_pid_sched_operations = {
.open = sched_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = sched_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -1535,7 -1535,7 +1535,7 @@@ static int sched_autogroup_open(struct
static const struct file_operations proc_pid_sched_autogroup_operations = {
.open = sched_autogroup_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = sched_autogroup_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -1638,7 -1638,7 +1638,7 @@@ static int timens_offsets_open(struct i
static const struct file_operations proc_timens_offsets_operations = {
.open = timens_offsets_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = timens_offsets_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -1695,7 -1695,7 +1695,7 @@@ static int comm_open(struct inode *inod
static const struct file_operations proc_pid_set_comm_operations = {
.open = comm_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = comm_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -2189,16 -2189,16 +2189,16 @@@ struct map_files_info
};
/*
- * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the
- * symlinks may be used to bypass permissions on ancestor directories in the
- * path to the file in question.
+ * Only allow CAP_SYS_ADMIN and CAP_CHECKPOINT_RESTORE to follow the links, due
+ * to concerns about how the symlinks may be used to bypass permissions on
+ * ancestor directories in the path to the file in question.
*/
static const char *
proc_map_files_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- if (!capable(CAP_SYS_ADMIN))
+ if (!checkpoint_restore_ns_capable(&init_user_ns))
return ERR_PTR(-EPERM);
return proc_pid_get_link(dentry, inode, done);
@@@ -2484,7 -2484,7 +2484,7 @@@ static int proc_timers_open(struct inod
static const struct file_operations proc_timers_operations = {
.open = proc_timers_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
};
@@@ -2576,7 -2576,7 +2576,7 @@@ static int timerslack_ns_open(struct in
static const struct file_operations proc_pid_set_timerslack_ns_operations = {
.open = timerslack_ns_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = timerslack_ns_write,
.llseek = seq_lseek,
.release = single_release,
@@@ -3028,7 -3028,7 +3028,7 @@@ static int proc_projid_map_open(struct
static const struct file_operations proc_uid_map_operations = {
.open = proc_uid_map_open,
.write = proc_uid_map_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = proc_id_map_release,
};
@@@ -3036,7 -3036,7 +3036,7 @@@
static const struct file_operations proc_gid_map_operations = {
.open = proc_gid_map_open,
.write = proc_gid_map_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = proc_id_map_release,
};
@@@ -3044,7 -3044,7 +3044,7 @@@
static const struct file_operations proc_projid_map_operations = {
.open = proc_projid_map_open,
.write = proc_projid_map_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = proc_id_map_release,
};
@@@ -3095,7 -3095,7 +3095,7 @@@ static int proc_setgroups_release(struc
static const struct file_operations proc_setgroups_operations = {
.open = proc_setgroups_open,
.write = proc_setgroups_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = proc_setgroups_release,
};
diff --combined fs/proc/proc_net.c
index ed8a6306990c,8274f4fc4d43..fb0cd45fcc26
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@@ -92,13 -92,13 +92,13 @@@ static int seq_release_net(struct inod
static const struct proc_ops proc_net_seq_ops = {
.proc_open = seq_open_net,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = proc_simple_write,
.proc_lseek = seq_lseek,
.proc_release = seq_release_net,
};
-int bpf_iter_init_seq_net(void *priv_data)
+int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux)
{
#ifdef CONFIG_NET_NS
struct seq_net_private *p = priv_data;
@@@ -204,7 -204,7 +204,7 @@@ static int single_release_net(struct in
static const struct proc_ops proc_net_single_ops = {
.proc_open = single_open_net,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = proc_simple_write,
.proc_lseek = seq_lseek,
.proc_release = single_release_net,
diff --combined fs/ubifs/file.c
index b77d1637bbbc,a3af46b39508..fb41c77cae1e
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@@ -222,7 -222,7 +222,7 @@@ static int write_begin_slow(struct addr
struct ubifs_info *c = inode->i_sb->s_fs_info;
pgoff_t index = pos >> PAGE_SHIFT;
struct ubifs_budget_req req = { .new_page = 1 };
- int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
+ int err, appending = !!(pos + len > inode->i_size);
struct page *page;
dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
@@@ -426,7 -426,7 +426,7 @@@ static int ubifs_write_begin(struct fil
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
pgoff_t index = pos >> PAGE_SHIFT;
- int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
+ int err, appending = !!(pos + len > inode->i_size);
int skipped_read = 0;
struct page *page;
@@@ -1668,7 -1668,6 +1668,6 @@@ const struct file_operations ubifs_file
.mmap = ubifs_file_mmap,
.fsync = ubifs_fsync,
.unlocked_ioctl = ubifs_ioctl,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.open = fscrypt_file_open,
#ifdef CONFIG_COMPAT
diff --combined fs/xfs/xfs_file.c
index c31cd3be9fb2,964bc733e765..0dcec0c2d5e8
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@@ -94,7 -94,6 +94,7 @@@ xfs_file_fsync
{
struct inode *inode = file->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_mount *mp = ip->i_mount;
int error = 0;
int log_flushed = 0;
@@@ -138,15 -137,13 +138,15 @@@
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip)) {
if (!datasync ||
- (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
- lsn = ip->i_itemp->ili_last_lsn;
+ (iip->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
+ lsn = iip->ili_last_lsn;
}
if (lsn) {
error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
- ip->i_itemp->ili_fsync_fields = 0;
+ spin_lock(&iip->ili_lock);
+ iip->ili_fsync_fields = 0;
+ spin_unlock(&iip->ili_lock);
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@@ -508,7 -505,7 +508,7 @@@ xfs_file_dio_aio_write
*/
if (xfs_is_cow_inode(ip)) {
trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
- return -EREMCHG;
+ return -ENOTBLK;
}
iolock = XFS_IOLOCK_EXCL;
} else {
@@@ -556,8 -553,8 +556,8 @@@ out
xfs_iunlock(ip, iolock);
/*
- * No fallback to buffered IO on errors for XFS, direct IO will either
- * complete fully or fail.
+ * No fallback to buffered IO after short writes for XFS, direct I/O
+ * will either complete fully or return an error.
*/
ASSERT(ret < 0 || ret == count);
return ret;
@@@ -717,7 -714,7 +717,7 @@@ xfs_file_write_iter
* allow an operation to fall back to buffered mode.
*/
ret = xfs_file_dio_aio_write(iocb, from);
- if (ret != -EREMCHG)
+ if (ret != -ENOTBLK)
return ret;
}
@@@ -1038,7 -1035,7 +1038,7 @@@ xfs_file_remap_range
/* Prepare and then clone file data. */
ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
&len, remap_flags);
- if (ret < 0 || len == 0)
+ if (ret || len == 0)
return ret;
trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
@@@ -1068,7 -1065,7 +1068,7 @@@
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_log_force_inode(dest);
out_unlock:
- xfs_reflink_remap_unlock(file_in, file_out);
+ xfs_iunlock2_io_mmap(src, dest);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return remapped > 0 ? remapped : ret;
@@@ -1083,7 -1080,7 +1083,7 @@@ xfs_file_open
return -EFBIG;
if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
return -EIO;
- file->f_mode |= FMODE_NOWAIT;
+ file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
return 0;
}
@@@ -1266,23 -1263,10 +1266,23 @@@ xfs_filemap_pfn_mkwrite
return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
}
+static void
+xfs_filemap_map_pages(
+ struct vm_fault *vmf,
+ pgoff_t start_pgoff,
+ pgoff_t end_pgoff)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+
+ xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+}
+
static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = xfs_filemap_fault,
.huge_fault = xfs_filemap_huge_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = xfs_filemap_map_pages,
.page_mkwrite = xfs_filemap_page_mkwrite,
.pfn_mkwrite = xfs_filemap_pfn_mkwrite,
};
@@@ -1313,7 -1297,6 +1313,6 @@@ const struct file_operations xfs_file_o
.llseek = xfs_file_llseek,
.read_iter = xfs_file_read_iter,
.write_iter = xfs_file_write_iter,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = xfs_file_ioctl,
diff --combined fs/zonefs/super.c
index 8ec7c8f109d7,d9f5fbeb5506..b0f602da8ec3
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@@ -335,7 -335,7 +335,7 @@@ static void zonefs_io_error(struct inod
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag;
unsigned int nr_zones =
- zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
struct zonefs_ioerr_data err = {
.inode = inode,
.write = write,
@@@ -398,7 -398,7 +398,7 @@@ static int zonefs_file_truncate(struct
goto unlock;
ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
- zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS);
+ zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
if (ret) {
zonefs_err(inode->i_sb,
"Zone management operation at %llu failed %d",
@@@ -607,14 -607,14 +607,14 @@@ static ssize_t zonefs_file_dio_append(s
int nr_pages;
ssize_t ret;
- nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
- if (!nr_pages)
- return 0;
-
max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
+ nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+ if (!nr_pages)
+ return 0;
+
bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
if (!bio)
return -ENOMEM;
@@@ -786,11 -786,8 +786,11 @@@ static ssize_t zonefs_file_write_iter(s
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
return -EFBIG;
- if (iocb->ki_flags & IOCB_DIRECT)
- return zonefs_file_dio_write(iocb, from);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ssize_t ret = zonefs_file_dio_write(iocb, from);
+ if (ret != -ENOTBLK)
+ return ret;
+ }
return zonefs_file_buffered_write(iocb, from);
}
@@@ -872,7 -869,6 +872,6 @@@ static const struct file_operations zon
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iomap_dio_iopoll,
};
@@@ -1053,16 -1049,14 +1052,16 @@@ static void zonefs_init_file_inode(stru
zi->i_ztype = type;
zi->i_zsector = zone->start;
+ zi->i_zone_size = zone->len << SECTOR_SHIFT;
+
zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
- zone->len << SECTOR_SHIFT);
+ zone->capacity << SECTOR_SHIFT);
zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
inode->i_size = zi->i_wpoffset;
- inode->i_blocks = zone->len;
+ inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
@@@ -1124,7 -1118,7 +1123,7 @@@ static int zonefs_create_zgroup(struct
char *file_name;
struct dentry *dir;
unsigned int n = 0;
- int ret = -ENOMEM;
+ int ret;
/* If the group is empty, there is nothing to do */
if (!zd->nr_zones[type])
@@@ -1140,10 -1134,8 +1139,10 @@@
zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
- if (!dir)
+ if (!dir) {
+ ret = -ENOMEM;
goto free;
+ }
/*
* The first zone contains the super block: skip it.
@@@ -1169,28 -1161,20 +1168,28 @@@
if (zonefs_zone_type(next) != type)
break;
zone->len += next->len;
+ zone->capacity += next->capacity;
if (next->cond == BLK_ZONE_COND_READONLY &&
zone->cond != BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_READONLY;
else if (next->cond == BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_OFFLINE;
}
+ if (zone->capacity != zone->len) {
+ zonefs_err(sb, "Invalid conventional zone capacity\n");
+ ret = -EINVAL;
+ goto free;
+ }
}
/*
* Use the file number within its group as file name.
*/
snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
- if (!zonefs_create_inode(dir, file_name, zone, type))
+ if (!zonefs_create_inode(dir, file_name, zone, type)) {
+ ret = -ENOMEM;
goto free;
+ }
n++;
}
diff --combined include/linux/bpf-cgroup.h
index 64f367044e25,f81d3b3752f9..82b26a1386d8
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@@ -46,8 -46,7 +46,8 @@@ struct bpf_cgroup_storage
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
- struct list_head list;
+ struct list_head list_map;
+ struct list_head list_cg;
struct rb_node node;
struct rcu_head rcu;
};
@@@ -79,9 -78,6 +79,9 @@@ struct cgroup_bpf
struct list_head progs[MAX_BPF_ATTACH_TYPE];
u32 flags[MAX_BPF_ATTACH_TYPE];
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
/* temp storage for effective prog array used by prog_attach/detach */
struct bpf_prog_array *inactive;
@@@ -136,7 -132,7 +136,7 @@@ int __cgroup_bpf_check_dev_permission(s
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
- void **buf, size_t *pcount, loff_t *ppos,
+ char **buf, size_t *pcount, loff_t *ppos,
enum bpf_attach_type type);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
@@@ -165,9 -161,6 +165,9 @@@ static inline void bpf_cgroup_storage_s
this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
@@@ -176,6 -169,7 +176,6 @@@ void bpf_cgroup_storage_link(struct bpf
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
@@@ -216,9 -210,6 +216,9 @@@
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
@@@ -389,6 -380,8 +389,6 @@@ static inline void bpf_cgroup_storage_s
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
- struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free(
@@@ -408,7 -401,6 +408,7 @@@ static inline int bpf_percpu_cgroup_sto
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
diff --combined include/linux/fs.h
index 8bfdf17f14a5,7680752cff24..e60db6ee065b
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@@ -68,7 -68,6 +68,7 @@@ struct fsverity_info
struct fsverity_operations;
struct fs_context;
struct fs_parameter_spec;
+struct fsinfo_context;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@@ -176,9 -175,6 +176,9 @@@ typedef int (dio_iodone_t)(struct kioc
/* File does not contribute to nr_files count */
#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+/* File supports async buffered reads */
+#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
+
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
* that indicates that they should check the contents of the iovec are
@@@ -319,8 -315,6 +319,8 @@@ enum rw_hint
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7)
+/* iocb->ki_waitq is valid */
+#define IOCB_WAITQ (1 << 8)
#define IOCB_NOIO (1 << 9)
struct kiocb {
@@@ -335,10 -329,7 +335,10 @@@
int ki_flags;
u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
- unsigned int ki_cookie; /* for ->iopoll */
+ union {
+ unsigned int ki_cookie; /* for ->iopoll */
+ struct wait_page_queue *ki_waitq; /* for async buffered IO */
+ };
randomized_struct_fields_end
};
@@@ -480,6 -471,45 +480,6 @@@ struct address_space
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
-struct request_queue;
-
-struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
- void * bd_claiming;
- void * bd_holder;
- int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- unsigned bd_block_size;
- u8 bd_partno;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct request_queue * bd_queue;
- struct backing_dev_info *bd_bdi;
- struct list_head bd_list;
- /*
- * Private data. You must have bd_claim'ed the block_device
- * to use this. NOTE: bd_claim allows an owner to claim
- * the same device multiple times, the owner must take special
- * care to not mess up bd_private for that case.
- */
- unsigned long bd_private;
-
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
-} __randomize_layout;
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
@@@ -878,6 -908,8 +878,6 @@@ static inline unsigned imajor(const str
return MAJOR(inode->i_rdev);
}
-extern struct block_device *I_BDEV(struct inode *inode);
-
struct fown_struct {
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
@@@ -1349,7 -1381,6 +1349,7 @@@ extern int send_sigurg(struct fown_stru
#define SB_NODIRATIME 2048 /* Do not update directory access times */
#define SB_SILENT 32768
#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define SB_I_VERSION (1<<23) /* Update inode I_version field */
#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
@@@ -1533,9 -1564,6 +1533,9 @@@ struct super_block
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
+
+ /* Superblock information */
+ u64 s_unique_id;
} __randomize_layout;
/* Helper functions so that in most cases filesystems will
@@@ -1747,6 -1775,14 +1747,6 @@@ struct dir_context
loff_t pos;
};
-struct block_device_operations;
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@@ -1884,8 -1920,6 +1884,6 @@@ ssize_t rw_copy_check_uvector(int type
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
- extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
@@@ -1924,9 -1958,6 +1922,9 @@@ struct super_operations
int (*thaw_super) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
+#ifdef CONFIG_FSINFO
+ int (*fsinfo)(struct path *, struct fsinfo_context *);
+#endif
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);
@@@ -1949,27 -1980,27 +1947,27 @@@
/*
* Inode flags - they have no relation to superblock flags now
*/
-#define S_SYNC 1 /* Writes are synced at once */
-#define S_NOATIME 2 /* Do not update access times */
-#define S_APPEND 4 /* Append-only file */
-#define S_IMMUTABLE 8 /* Immutable file */
-#define S_DEAD 16 /* removed, but still open directory */
-#define S_NOQUOTA 32 /* Inode is not counted to quota */
-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
-#define S_NOCMTIME 128 /* Do not update file c/mtime */
-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE 512 /* Inode is fs-internal */
-#define S_IMA 1024 /* Inode has an associated IMA struct */
-#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
-#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#define S_SYNC (1 << 0) /* Writes are synced at once */
+#define S_NOATIME (1 << 1) /* Do not update access times */
+#define S_APPEND (1 << 2) /* Append-only file */
+#define S_IMMUTABLE (1 << 3) /* Immutable file */
+#define S_DEAD (1 << 4) /* removed, but still open directory */
+#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
+#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
+#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
+#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
+#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
+#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
-#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
-#define S_DAX 0 /* Make all the DAX code disappear */
+#define S_DAX 0 /* Make all the DAX code disappear */
#endif
-#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
-#define S_CASEFOLD 32768 /* Casefolded file */
-#define S_VERITY 65536 /* Verity file (using fs/verity/) */
+#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD (1 << 15) /* Casefolded file */
+#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@@ -2231,9 -2262,18 +2229,9 @@@ struct file_system_type
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-#ifdef CONFIG_BLOCK
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
-#else
-static inline struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int))
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
extern struct dentry *mount_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@@ -2242,7 -2282,14 +2240,7 @@@ extern struct dentry *mount_nodev(struc
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
void generic_shutdown_super(struct super_block *sb);
-#ifdef CONFIG_BLOCK
void kill_block_super(struct super_block *sb);
-#else
-static inline void kill_block_super(struct super_block *sb)
-{
- BUG();
-}
-#endif
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
@@@ -2532,16 -2579,93 +2530,16 @@@ extern struct kmem_cache *names_cachep
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifdef CONFIG_BLOCK
-extern int register_blkdev(unsigned int, const char *);
-extern void unregister_blkdev(unsigned int, const char *);
-extern struct block_device *bdget(dev_t);
-extern struct block_device *bdgrab(struct block_device *bdev);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern void invalidate_bdev(struct block_device *);
-extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
-extern int sync_blockdev(struct block_device *bdev);
-extern struct super_block *freeze_bdev(struct block_device *);
-extern void emergency_thaw_all(void);
-extern void emergency_thaw_bdev(struct super_block *sb);
-extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
-extern int fsync_bdev(struct block_device *);
-
extern struct super_block *blockdev_superblock;
-
static inline bool sb_is_blkdev_sb(struct super_block *sb)
{
- return sb == blockdev_superblock;
-}
-#else
-static inline void bd_forget(struct inode *inode) {}
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
}
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
-}
-
-static inline int emergency_thaw_bdev(struct super_block *sb)
-{
- return 0;
-}
-
-static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
-{
-}
-
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return false;
-}
-#endif
+void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-#ifdef CONFIG_BLOCK
-extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
- void *holder);
-extern struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder);
-extern void bd_finish_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void bd_abort_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-
-#ifdef CONFIG_SYSFS
-extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif
-#endif
/* fs/char_dev.c */
#define CHRDEV_MAJOR_MAX 512
@@@ -2572,12 -2696,31 +2570,12 @@@ static inline void unregister_chrdev(un
__unregister_chrdev(major, 0, 256, name);
}
-/* fs/block_dev.c */
-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
-#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
-
-#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_MAX 512
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-#else
-#define BLKDEV_MAJOR_MAX 0
-#endif
-
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-#ifdef CONFIG_BLOCK
-extern int revalidate_disk(struct gendisk *);
-extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *, bool);
-#endif
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@@ -2682,7 -2825,7 +2680,7 @@@ static inline errseq_t filemap_sample_w
/**
* file_sample_sb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
+ * @file: file pointer to be sampled
*
* Grab the most current superblock-level errseq_t value for the given
* struct file.
@@@ -2978,6 -3121,10 +2976,6 @@@ static inline void remove_inode_hash(st
extern void inode_sb_list_add(struct inode *inode);
-#ifdef CONFIG_BLOCK
-extern int bdev_read_only(struct block_device *);
-#endif
-extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
diff --combined include/linux/proc_fs.h
index 2df965cd0974,97b3f5f06db9..270cab43ca3d
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@@ -30,6 -30,7 +30,7 @@@ struct proc_ops
unsigned int proc_flags;
int (*proc_open)(struct inode *, struct file *);
ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *);
ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
loff_t (*proc_lseek)(struct file *, loff_t, int);
int (*proc_release)(struct inode *, struct file *);
@@@ -133,8 -134,7 +134,8 @@@ struct proc_dir_entry *proc_create_net_
void *data);
extern struct pid *tgid_pidfd_to_pid(const struct file *file);
-extern int bpf_iter_init_seq_net(void *priv_data);
+struct bpf_iter_aux_info;
+extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux);
extern void bpf_iter_fini_seq_net(void *priv_data);
#ifdef CONFIG_PROC_PID_ARCH_STATUS
diff --combined kernel/bpf/cgroup.c
index 957cce1d5168,81dcf15990eb..e180bfcdb468
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@@ -37,34 -37,17 +37,34 @@@ static void bpf_cgroup_storages_free(st
}
static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
- struct bpf_prog *prog)
+ struct bpf_cgroup_storage *new_storages[],
+ enum bpf_attach_type type,
+ struct bpf_prog *prog,
+ struct cgroup *cgrp)
{
enum bpf_cgroup_storage_type stype;
+ struct bpf_cgroup_storage_key key;
+ struct bpf_map *map;
+
+ key.cgroup_inode_id = cgroup_id(cgrp);
+ key.attach_type = type;
for_each_cgroup_storage_type(stype) {
+ map = prog->aux->cgroup_storage[stype];
+ if (!map)
+ continue;
+
+ storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
+ if (storages[stype])
+ continue;
+
storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
if (IS_ERR(storages[stype])) {
- storages[stype] = NULL;
- bpf_cgroup_storages_free(storages);
+ bpf_cgroup_storages_free(new_storages);
return -ENOMEM;
}
+
+ new_storages[stype] = storages[stype];
}
return 0;
@@@ -80,7 -63,7 +80,7 @@@ static void bpf_cgroup_storages_assign(
}
static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
- struct cgroup* cgrp,
+ struct cgroup *cgrp,
enum bpf_attach_type attach_type)
{
enum bpf_cgroup_storage_type stype;
@@@ -89,6 -72,14 +89,6 @@@
bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
}
-static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[])
-{
- enum bpf_cgroup_storage_type stype;
-
- for_each_cgroup_storage_type(stype)
- bpf_cgroup_storage_unlink(storages[stype]);
-}
-
/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
* It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
* doesn't free link memory, which will eventually be done by bpf_link's
@@@ -110,23 -101,22 +110,23 @@@ static void cgroup_bpf_release(struct w
struct cgroup *p, *cgrp = container_of(work, struct cgroup,
bpf.release_work);
struct bpf_prog_array *old_array;
+ struct list_head *storages = &cgrp->bpf.storages;
+ struct bpf_cgroup_storage *storage, *stmp;
+
unsigned int type;
mutex_lock(&cgroup_mutex);
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
struct list_head *progs = &cgrp->bpf.progs[type];
- struct bpf_prog_list *pl, *tmp;
+ struct bpf_prog_list *pl, *pltmp;
- list_for_each_entry_safe(pl, tmp, progs, node) {
+ list_for_each_entry_safe(pl, pltmp, progs, node) {
list_del(&pl->node);
if (pl->prog)
bpf_prog_put(pl->prog);
if (pl->link)
bpf_cgroup_link_auto_detach(pl->link);
- bpf_cgroup_storages_unlink(pl->storage);
- bpf_cgroup_storages_free(pl->storage);
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
@@@ -136,11 -126,6 +136,11 @@@
bpf_prog_array_free(old_array);
}
+ list_for_each_entry_safe(storage, stmp, storages, list_cg) {
+ bpf_cgroup_storage_unlink(storage);
+ bpf_cgroup_storage_free(storage);
+ }
+
mutex_unlock(&cgroup_mutex);
for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
@@@ -305,8 -290,6 +305,8 @@@ int cgroup_bpf_inherit(struct cgroup *c
for (i = 0; i < NR; i++)
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
+ INIT_LIST_HEAD(&cgrp->bpf.storages);
+
for (i = 0; i < NR; i++)
if (compute_effective_progs(cgrp, i, &arrays[i]))
goto cleanup;
@@@ -439,7 -422,7 +439,7 @@@ int __cgroup_bpf_attach(struct cgroup *
struct list_head *progs = &cgrp->bpf.progs[type];
struct bpf_prog *old_prog = NULL;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
- struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
+ struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
struct bpf_prog_list *pl;
int err;
@@@ -472,16 -455,17 +472,16 @@@
if (IS_ERR(pl))
return PTR_ERR(pl);
- if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog))
+ if (bpf_cgroup_storages_alloc(storage, new_storage, type,
+ prog ? : link->link.prog, cgrp))
return -ENOMEM;
if (pl) {
old_prog = pl->prog;
- bpf_cgroup_storages_unlink(pl->storage);
- bpf_cgroup_storages_assign(old_storage, pl->storage);
} else {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
- bpf_cgroup_storages_free(storage);
+ bpf_cgroup_storages_free(new_storage);
return -ENOMEM;
}
list_add_tail(&pl->node, progs);
@@@ -496,11 -480,12 +496,11 @@@
if (err)
goto cleanup;
- bpf_cgroup_storages_free(old_storage);
if (old_prog)
bpf_prog_put(old_prog);
else
static_branch_inc(&cgroup_bpf_enabled_key);
- bpf_cgroup_storages_link(pl->storage, cgrp, type);
+ bpf_cgroup_storages_link(new_storage, cgrp, type);
return 0;
cleanup:
@@@ -508,7 -493,9 +508,7 @@@
pl->prog = old_prog;
pl->link = NULL;
}
- bpf_cgroup_storages_free(pl->storage);
- bpf_cgroup_storages_assign(pl->storage, old_storage);
- bpf_cgroup_storages_link(pl->storage, cgrp, type);
+ bpf_cgroup_storages_free(new_storage);
if (!old_prog) {
list_del(&pl->node);
kfree(pl);
@@@ -692,6 -679,8 +692,6 @@@ int __cgroup_bpf_detach(struct cgroup *
/* now can actually delete it from this cgroup list */
list_del(&pl->node);
- bpf_cgroup_storages_unlink(pl->storage);
- bpf_cgroup_storages_free(pl->storage);
kfree(pl);
if (list_empty(progs))
/* last program was detached, reset flags to zero */
@@@ -1213,7 -1202,7 +1213,7 @@@ const struct bpf_verifier_ops cg_dev_ve
*/
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
- void **buf, size_t *pcount, loff_t *ppos,
+ char **buf, size_t *pcount, loff_t *ppos,
enum bpf_attach_type type)
{
struct bpf_sysctl_kern ctx = {
diff --combined kernel/irq/debugfs.c
index b95ff5d5f4bd,bac13f73cb52..3b0959deeb7a
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@@ -112,7 -112,6 +112,7 @@@ static const struct irq_bit_descr irqda
BIT_MASK_DESCR(IRQD_AFFINITY_SET),
BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
+ BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
BIT_MASK_DESCR(IRQD_CAN_RESERVE),
BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
@@@ -121,10 -120,6 +121,10 @@@
BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
+
+ BIT_MASK_DESCR(IRQD_DEFAULT_TRIGGER_SET),
+
+ BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
};
static const struct irq_bit_descr irqdesc_states[] = {
@@@ -206,7 -201,7 +206,7 @@@ static ssize_t irq_debug_write(struct f
static const struct file_operations dfs_irq_ops = {
.open = irq_debug_open,
.write = irq_debug_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined kernel/kallsyms.c
index 95cb74f73292,1c46a1f2e9f9..ad0646efe554
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@@ -24,7 -24,6 +24,7 @@@
#include <linux/slab.h>
#include <linux/filter.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/compiler.h>
/*
@@@ -438,7 -437,6 +438,7 @@@ struct kallsym_iter
loff_t pos_arch_end;
loff_t pos_mod_end;
loff_t pos_ftrace_mod_end;
+ loff_t pos_bpf_end;
unsigned long value;
unsigned int nameoff; /* If iterating in core kernel symbols. */
char type;
@@@ -482,11 -480,6 +482,11 @@@ static int get_ksymbol_mod(struct kalls
return 1;
}
+/*
+ * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace
+ * purposes. In that case "__builtin__ftrace" is used as a module name, even
+ * though "__builtin__ftrace" is not a module.
+ */
static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
{
int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
@@@ -503,33 -496,11 +503,33 @@@
static int get_ksymbol_bpf(struct kallsym_iter *iter)
{
+ int ret;
+
strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
iter->exported = 0;
- return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
- &iter->value, &iter->type,
- iter->name) < 0 ? 0 : 1;
+ ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
+ &iter->value, &iter->type,
+ iter->name);
+ if (ret < 0) {
+ iter->pos_bpf_end = iter->pos;
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * This uses "__builtin__kprobes" as a module name for symbols for pages
+ * allocated for kprobes' purposes, even though "__builtin__kprobes" is
not a
+ * module.
+ */
+static int get_ksymbol_kprobe(struct kallsym_iter *iter)
+{
+ strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
+ iter->exported = 0;
+ return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
+ &iter->value, &iter->type,
+ iter->name) < 0 ? 0 : 1;
}
/* Returns space to next name. */
@@@ -556,7 -527,6 +556,7 @@@ static void reset_iter(struct kallsym_i
iter->pos_arch_end = 0;
iter->pos_mod_end = 0;
iter->pos_ftrace_mod_end = 0;
+ iter->pos_bpf_end = 0;
}
}
@@@ -581,11 -551,7 +581,11 @@@ static int update_iter_mod(struct kalls
get_ksymbol_ftrace_mod(iter))
return 1;
- return get_ksymbol_bpf(iter);
+ if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
+ get_ksymbol_bpf(iter))
+ return 1;
+
+ return get_ksymbol_kprobe(iter);
}
/* Returns false if pos at or past end of file. */
@@@ -738,7 -704,7 +738,7 @@@ const char *kdb_walk_kallsyms(loff_t *p
static const struct proc_ops kallsyms_proc_ops = {
.proc_open = kallsyms_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release_private,
};
diff --combined kernel/locking/lockdep_proc.c
index 02ef87f50df2,60c725f53c26..dc4dec091486
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@@ -423,7 -423,7 +423,7 @@@ static void seq_lock_time(struct seq_fi
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
+ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
@@@ -669,7 -669,7 +669,7 @@@ static int lock_stat_release(struct ino
static const struct proc_ops lock_stat_proc_ops = {
.proc_open = lock_stat_open,
.proc_write = lock_stat_write,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = lock_stat_release,
};
diff --combined kernel/module.c
index e7b4ff7e4fd0,f558214d6086..9abfc0cec012
--- a/kernel/module.c
+++ b/kernel/module.c
@@@ -3237,7 -3237,7 +3237,7 @@@ static int find_module_sections(struct
if (section_addr(info, "__obsparm"))
pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
- info->debug = section_objs(info, "__verbose",
+ info->debug = section_objs(info, "__dyndbg",
sizeof(*info->debug), &info->num_debug);
return 0;
@@@ -4391,7 -4391,7 +4391,7 @@@ static int modules_open(struct inode *i
static const struct proc_ops modules_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = modules_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
};
diff --combined kernel/sched/psi.c
index 967732c0766c,6795170140a0..4c084d66746a
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@@ -190,6 -190,7 +190,6 @@@ static void group_init(struct psi_grou
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
/* Init trigger-related members */
- atomic_set(&group->poll_scheduled, 0);
mutex_init(&group->trigger_lock);
INIT_LIST_HEAD(&group->triggers);
memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
@@@ -198,7 -199,7 +198,7 @@@
memset(group->polling_total, 0, sizeof(group->polling_total));
group->polling_next_update = ULLONG_MAX;
group->polling_until = 0;
- rcu_assign_pointer(group->poll_kworker, NULL);
+ rcu_assign_pointer(group->poll_task, NULL);
}
void __init psi_init(void)
@@@ -546,38 -547,47 +546,38 @@@ static u64 update_triggers(struct psi_g
return now + group->poll_min_period;
}
-/*
- * Schedule polling if it's not already scheduled. It's safe to call even from
- * hotpath because even though kthread_queue_delayed_work takes worker->lock
- * spinlock that spinlock is never contended due to poll_scheduled atomic
- * preventing such competition.
- */
+/* Schedule polling if it's not already scheduled. */
static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
{
- struct kthread_worker *kworker;
+ struct task_struct *task;
- /* Do not reschedule if already scheduled */
- if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+ /*
+ * Do not reschedule if already scheduled.
+ * Possible race with a timer scheduled after this check but before
+ * mod_timer below can be tolerated because group->polling_next_update
+ * will keep updates on schedule.
+ */
+ if (timer_pending(&group->poll_timer))
return;
rcu_read_lock();
- kworker = rcu_dereference(group->poll_kworker);
+ task = rcu_dereference(group->poll_task);
/*
* kworker might be NULL in case psi_trigger_destroy races with
* psi_task_change (hotpath) which can't use locks
*/
- if (likely(kworker))
- kthread_queue_delayed_work(kworker, &group->poll_work, delay);
- else
- atomic_set(&group->poll_scheduled, 0);
+ if (likely(task))
+ mod_timer(&group->poll_timer, jiffies + delay);
rcu_read_unlock();
}
-static void psi_poll_work(struct kthread_work *work)
+static void psi_poll_work(struct psi_group *group)
{
- struct kthread_delayed_work *dwork;
- struct psi_group *group;
u32 changed_states;
u64 now;
- dwork = container_of(work, struct kthread_delayed_work, work);
- group = container_of(dwork, struct psi_group, poll_work);
-
- atomic_set(&group->poll_scheduled, 0);
-
mutex_lock(&group->trigger_lock);
now = sched_clock();
@@@ -613,32 -623,6 +613,32 @@@ out
mutex_unlock(&group->trigger_lock);
}
+static int psi_poll_worker(void *data)
+{
+ struct psi_group *group = (struct psi_group *)data;
+
+ sched_set_fifo_low(current);
+
+ while (true) {
+ wait_event_interruptible(group->poll_wait,
+ atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ psi_poll_work(group);
+ }
+ return 0;
+}
+
+static void poll_timer_fn(struct timer_list *t)
+{
+ struct psi_group *group = from_timer(group, t, poll_timer);
+
+ atomic_set(&group->poll_wakeup, 1);
+ wake_up_interruptible(&group->poll_wait);
+}
+
static void record_times(struct psi_group_cpu *groupc, int cpu,
bool memstall_tick)
{
@@@ -1115,20 -1099,22 +1115,20 @@@ struct psi_trigger *psi_trigger_create(
mutex_lock(&group->trigger_lock);
- if (!rcu_access_pointer(group->poll_kworker)) {
- struct sched_param param = {
- .sched_priority = 1,
- };
- struct kthread_worker *kworker;
+ if (!rcu_access_pointer(group->poll_task)) {
+ struct task_struct *task;
- kworker = kthread_create_worker(0, "psimon");
- if (IS_ERR(kworker)) {
+ task = kthread_create(psi_poll_worker, group, "psimon");
+ if (IS_ERR(task)) {
kfree(t);
mutex_unlock(&group->trigger_lock);
- return ERR_CAST(kworker);
+ return ERR_CAST(task);
}
- sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
- kthread_init_delayed_work(&group->poll_work,
- psi_poll_work);
- rcu_assign_pointer(group->poll_kworker, kworker);
+ atomic_set(&group->poll_wakeup, 0);
+ init_waitqueue_head(&group->poll_wait);
+ wake_up_process(task);
+ timer_setup(&group->poll_timer, poll_timer_fn, 0);
+ rcu_assign_pointer(group->poll_task, task);
}
list_add(&t->node, &group->triggers);
@@@ -1146,7 -1132,7 +1146,7 @@@ static void psi_trigger_destroy(struct
{
struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
struct psi_group *group = t->group;
- struct kthread_worker *kworker_to_destroy = NULL;
+ struct task_struct *task_to_destroy = NULL;
if (static_branch_likely(&psi_disabled))
return;
@@@ -1172,13 -1158,13 +1172,13 @@@
period = min(period, div_u64(tmp->win.size,
UPDATES_PER_WINDOW));
group->poll_min_period = period;
- /* Destroy poll_kworker when the last trigger is destroyed */
+ /* Destroy poll_task when the last trigger is destroyed */
if (group->poll_states == 0) {
group->polling_until = 0;
- kworker_to_destroy = rcu_dereference_protected(
- group->poll_kworker,
+ task_to_destroy = rcu_dereference_protected(
+ group->poll_task,
lockdep_is_held(&group->trigger_lock));
- rcu_assign_pointer(group->poll_kworker, NULL);
+ rcu_assign_pointer(group->poll_task, NULL);
}
}
@@@ -1186,23 -1172,25 +1186,23 @@@
/*
* Wait for both *trigger_ptr from psi_trigger_replace and
- * poll_kworker RCUs to complete their read-side critical sections
- * before destroying the trigger and optionally the poll_kworker
+ * poll_task RCUs to complete their read-side critical sections
+ * before destroying the trigger and optionally the poll_task
*/
synchronize_rcu();
/*
* Destroy the kworker after releasing trigger_lock to prevent a
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
- if (kworker_to_destroy) {
+ if (task_to_destroy) {
/*
* After the RCU grace period has expired, the worker
- * can no longer be found through group->poll_kworker.
+ * can no longer be found through group->poll_task.
* But it might have been already scheduled before
* that - deschedule it cleanly before destroying it.
*/
- kthread_cancel_delayed_work_sync(&group->poll_work);
- atomic_set(&group->poll_scheduled, 0);
-
- kthread_destroy_worker(kworker_to_destroy);
+ del_timer_sync(&group->poll_timer);
+ kthread_stop(task_to_destroy);
}
kfree(t);
}
@@@ -1317,7 -1305,7 +1317,7 @@@ static int psi_fop_release(struct inod
static const struct proc_ops psi_io_proc_ops = {
.proc_open = psi_io_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_write = psi_io_write,
.proc_poll = psi_fop_poll,
@@@ -1326,7 -1314,7 +1326,7 @@@
static const struct proc_ops psi_memory_proc_ops = {
.proc_open = psi_memory_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_write = psi_memory_write,
.proc_poll = psi_fop_poll,
@@@ -1335,7 -1323,7 +1335,7 @@@
static const struct proc_ops psi_cpu_proc_ops = {
.proc_open = psi_cpu_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_write = psi_cpu_write,
.proc_poll = psi_fop_poll,
diff --combined kernel/trace/ftrace.c
index 62e9b93d5fee,6a645f2ebf47..a3093a84bae3
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@@ -2388,14 -2388,6 +2388,14 @@@ struct ftrace_ops direct_ops =
.flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT,
+ /*
+ * By declaring the main trampoline as this trampoline
+ * it will never have one allocated for it. Allocated
+ * trampolines should not call direct functions.
+ * The direct_ops should only be called by the builtin
+ * ftrace_regs_caller trampoline.
+ */
+ .trampoline = FTRACE_REGS_ADDR,
};
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
@@@ -2772,50 -2764,6 +2772,50 @@@ void __weak arch_ftrace_trampoline_free
{
}
+/* List of trace_ops that have allocated trampolines */
+static LIST_HEAD(ftrace_ops_trampoline_list);
+
+static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
+{
+ lockdep_assert_held(&ftrace_lock);
+ list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
+}
+
+static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
+{
+ lockdep_assert_held(&ftrace_lock);
+ list_del_rcu(&ops->list);
+}
+
+/*
+ * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
+ * for pages allocated for ftrace purposes, even though "__builtin__ftrace"
is
+ * not a module.
+ */
+#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
+#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
+
+static void ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+ if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
+ ops->trampoline) {
+ /*
+ * Record the text poke event before the ksymbol unregister
+ * event.
+ */
+ perf_event_text_poke((void *)ops->trampoline,
+ (void *)ops->trampoline,
+ ops->trampoline_size, NULL, 0);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+ ops->trampoline, ops->trampoline_size,
+ true, FTRACE_TRAMPOLINE_SYM);
+ /* Remove from kallsyms after the perf events */
+ ftrace_remove_trampoline_from_kallsyms(ops);
+ }
+
+ arch_ftrace_trampoline_free(ops);
+}
+
static void ftrace_startup_enable(int command)
{
if (saved_ftrace_func != ftrace_trace_function) {
@@@ -2986,7 -2934,7 +2986,7 @@@ int ftrace_shutdown(struct ftrace_ops *
synchronize_rcu_tasks();
free_ops:
- arch_ftrace_trampoline_free(ops);
+ ftrace_trampoline_free(ops);
}
return 0;
@@@ -5613,21 -5561,21 +5613,21 @@@ int ftrace_regex_release(struct inode *
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
};
static const struct file_operations ftrace_enabled_fops = {
.open = ftrace_enabled_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
};
static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = ftrace_filter_write,
.llseek = tracing_lseek,
.release = ftrace_regex_release,
@@@ -5635,7 -5583,7 +5635,7 @@@
static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = ftrace_notrace_write,
.llseek = tracing_lseek,
.release = ftrace_regex_release,
@@@ -6033,7 -5981,7 +6033,7 @@@ ftrace_graph_write(struct file *file, c
static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = ftrace_graph_write,
.llseek = tracing_lseek,
.release = ftrace_graph_release,
@@@ -6041,7 -5989,7 +6041,7 @@@
static const struct file_operations ftrace_graph_notrace_fops = {
.open = ftrace_graph_notrace_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = ftrace_graph_write,
.llseek = tracing_lseek,
.release = ftrace_graph_release,
@@@ -6230,27 -6178,6 +6230,27 @@@ struct ftrace_mod_map
unsigned int num_funcs;
};
+static int ftrace_get_trampoline_kallsym(unsigned int symnum,
+ unsigned long *value, char *type,
+ char *name, char *module_name,
+ int *exported)
+{
+ struct ftrace_ops *op;
+
+ list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
+ if (!op->trampoline || symnum--)
+ continue;
+ *value = op->trampoline;
+ *type = 't';
+ strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
+ strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
+ *exported = 0;
+ return 0;
+ }
+
+ return -ERANGE;
+}
+
#ifdef CONFIG_MODULES
#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
@@@ -6587,7 -6514,6 +6587,7 @@@ int ftrace_mod_get_kallsym(unsigned in
{
struct ftrace_mod_map *mod_map;
struct ftrace_mod_func *mod_func;
+ int ret;
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
@@@ -6614,10 -6540,8 +6614,10 @@@
WARN_ON(1);
break;
}
+ ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+ module_name, exported);
preempt_enable();
- return -ERANGE;
+ return ret;
}
#else
@@@ -6629,18 -6553,6 +6629,18 @@@ allocate_ftrace_mod_map(struct module *
{
return NULL;
}
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name, char *module_name,
+ int *exported)
+{
+ int ret;
+
+ preempt_disable();
+ ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+ module_name, exported);
+ preempt_enable();
+ return ret;
+}
#endif /* CONFIG_MODULES */
struct ftrace_init_func {
@@@ -6821,24 -6733,7 +6821,24 @@@ void __weak arch_ftrace_update_trampoli
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
+ unsigned long trampoline = ops->trampoline;
+
arch_ftrace_update_trampoline(ops);
+ if (ops->trampoline && ops->trampoline != trampoline &&
+ (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
+ /* Add to kallsyms before the perf events */
+ ftrace_add_trampoline_to_kallsyms(ops);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+ ops->trampoline, ops->trampoline_size, false,
+ FTRACE_TRAMPOLINE_SYM);
+ /*
+ * Record the perf text poke event after the ksymbol register
+ * event.
+ */
+ perf_event_text_poke((void *)ops->trampoline, NULL, 0,
+ (void *)ops->trampoline,
+ ops->trampoline_size);
+ }
}
void ftrace_init_trace_array(struct trace_array *tr)
@@@ -7411,7 -7306,7 +7411,7 @@@ ftrace_pid_release(struct inode *inode
static const struct file_operations ftrace_pid_fops = {
.open = ftrace_pid_open,
.write = ftrace_pid_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = tracing_lseek,
.release = ftrace_pid_release,
};
@@@ -7419,7 -7314,7 +7419,7 @@@
static const struct file_operations ftrace_no_pid_fops = {
.open = ftrace_no_pid_open,
.write = ftrace_no_pid_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = tracing_lseek,
.release = ftrace_pid_release,
};
diff --combined kernel/trace/trace.c
index 716956d751b2,36dbb58680fc..6da6cb3a0c46
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@@ -1543,7 -1543,8 +1543,7 @@@ static void latency_fsnotify_workfn(str
{
struct trace_array *tr = container_of(work, struct trace_array,
fsnotify_work);
- fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
- tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
}
static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
@@@ -2002,6 -2003,7 +2002,6 @@@ static void tracing_reset_cpu(struct ar
void tracing_reset_online_cpus(struct array_buffer *buf)
{
struct trace_buffer *buffer = buf->buffer;
- int cpu;
if (!buffer)
return;
@@@ -2013,7 -2015,8 +2013,7 @@@
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
- for_each_online_cpu(cpu)
- ring_buffer_reset_cpu(buffer, cpu);
+ ring_buffer_reset_online_cpus(buffer);
ring_buffer_record_enable(buffer);
}
@@@ -3343,16 -3346,12 +3343,16 @@@ int trace_array_printk(struct trace_arr
int ret;
va_list ap;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
- return 0;
-
if (!tr)
return -ENOENT;
+ /* This is only allowed for created instances */
+ if (tr == &global_trace)
+ return 0;
+
+ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
@@@ -4612,7 -4611,7 +4612,7 @@@ loff_t tracing_lseek(struct file *file
static const struct file_operations tracing_fops = {
.open = tracing_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = tracing_write_stub,
.llseek = tracing_lseek,
.release = tracing_release,
@@@ -4620,7 -4619,7 +4620,7 @@@
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = show_traces_release,
};
@@@ -4958,7 -4957,7 +4958,7 @@@ static int tracing_trace_options_open(s
static const struct file_operations tracing_iter_fops = {
.open = tracing_trace_options_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = tracing_single_release_tr,
.write = tracing_trace_options_write,
@@@ -5298,7 -5297,7 +5298,7 @@@ static int tracing_saved_tgids_open(str
static const struct file_operations tracing_saved_tgids_fops = {
.open = tracing_saved_tgids_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -5377,7 -5376,7 +5377,7 @@@ static int tracing_saved_cmdlines_open(
static const struct file_operations tracing_saved_cmdlines_fops = {
.open = tracing_saved_cmdlines_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -5543,7 -5542,7 +5543,7 @@@ static int tracing_eval_map_open(struc
static const struct file_operations tracing_eval_map_fops = {
.open = tracing_eval_map_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release,
};
@@@ -5888,7 -5887,7 +5888,7 @@@ int tracing_set_tracer(struct trace_arr
}
/* If trace pipe files are being read, we can't change the tracer */
- if (tr->current_trace->ref) {
+ if (tr->trace_ref) {
ret = -EBUSY;
goto out;
}
@@@ -6104,7 -6103,7 +6104,7 @@@ static int tracing_open_pipe(struct ino
nonseekable_open(inode, filp);
- tr->current_trace->ref++;
+ tr->trace_ref++;
out:
mutex_unlock(&trace_types_lock);
return ret;
@@@ -6123,7 -6122,7 +6123,7 @@@ static int tracing_release_pipe(struct
mutex_lock(&trace_types_lock);
- tr->current_trace->ref--;
+ tr->trace_ref--;
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
@@@ -7120,7 -7119,7 +7120,7 @@@ static const struct file_operations tra
static const struct file_operations trace_clock_fops = {
.open = tracing_clock_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = tracing_single_release_tr,
.write = tracing_clock_write,
@@@ -7128,7 -7127,7 +7128,7 @@@
static const struct file_operations trace_time_stamp_mode_fops = {
.open = tracing_time_stamp_mode_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = tracing_single_release_tr,
};
@@@ -7136,7 -7135,7 +7136,7 @@@
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = tracing_snapshot_write,
.llseek = tracing_lseek,
.release = tracing_snapshot_release,
@@@ -7392,7 -7391,7 +7392,7 @@@ static int tracing_err_log_release(stru
static const struct file_operations tracing_err_log_fops = {
.open = tracing_err_log_open,
.write = tracing_err_log_write,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = tracing_err_log_release,
};
@@@ -7425,7 -7424,7 +7425,7 @@@ static int tracing_buffers_open(struct
filp->private_data = info;
- tr->current_trace->ref++;
+ tr->trace_ref++;
mutex_unlock(&trace_types_lock);
@@@ -7526,7 -7525,7 +7526,7 @@@ static int tracing_buffers_release(stru
mutex_lock(&trace_types_lock);
- iter->tr->current_trace->ref--;
+ iter->tr->trace_ref--;
__trace_array_put(iter->tr);
@@@ -8734,7 -8733,7 +8734,7 @@@ static int __remove_instance(struct tra
int i;
/* Reference counter for a newly created trace array = 1. */
- if (tr->ref > 1 || (tr->current_trace &&
tr->current_trace->ref))
+ if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
return -EBUSY;
list_del(&tr->list);
@@@ -8946,7 -8945,9 +8946,7 @@@ struct dentry *tracing_init_dentry(void
if (tr->dir)
return NULL;
- if (WARN_ON(!tracefs_initialized()) ||
- (IS_ENABLED(CONFIG_DEBUG_FS) &&
- WARN_ON(!debugfs_initialized())))
+ if (WARN_ON(!tracefs_initialized()))
return ERR_PTR(-ENODEV);
/*
diff --combined lib/dynamic_debug.c
index 1d012e597cc3,2317d29e16de..fdf6a1805d26
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@@ -11,7 -11,7 +11,7 @@@
* Copyright (C) 2013 Du, Changbin <changbin.du(a)gmail.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#define pr_fmt(fmt) "dyndbg: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@@ -39,8 -39,8 +39,8 @@@
#include <rdma/ib_verbs.h>
-extern struct _ddebug __start___verbose[];
-extern struct _ddebug __stop___verbose[];
+extern struct _ddebug __start___dyndbg[];
+extern struct _ddebug __stop___dyndbg[];
struct ddebug_table {
struct list_head link;
@@@ -62,11 -62,6 +62,11 @@@ struct ddebug_iter
unsigned int idx;
};
+struct flag_settings {
+ unsigned int flags;
+ unsigned int mask;
+};
+
static DEFINE_MUTEX(ddebug_lock);
static LIST_HEAD(ddebug_tables);
static int verbose;
@@@ -92,33 -87,30 +92,33 @@@ static struct { unsigned flag:8; char o
{ _DPRINTK_FLAGS_NONE, '_' },
};
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
/* format a string into buf[] which describes the _ddebug's flags */
-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
- size_t maxlen)
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
{
- char *p = buf;
+ char *p = fb->buf;
int i;
- BUG_ON(maxlen < 6);
for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
- if (dp->flags & opt_array[i].flag)
+ if (flags & opt_array[i].flag)
*p++ = opt_array[i].opt_char;
- if (p == buf)
+ if (p == fb->buf)
*p++ = '_';
*p = '\0';
- return buf;
+ return fb->buf;
}
-#define vpr_info(fmt, ...) \
+#define vnpr_info(lvl, fmt, ...) \
do { \
- if (verbose) \
+ if (verbose >= lvl) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
+#define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__)
+#define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__)
+
static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
{
/* trim any trailing newlines */
@@@ -132,10 -124,10 +132,10 @@@
vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\"
format=\"%.*s\" lineno=%u-%u\n",
msg,
- query->function ? query->function : "",
- query->filename ? query->filename : "",
- query->module ? query->module : "",
- fmtlen, query->format ? query->format : "",
+ query->function ?: "",
+ query->filename ?: "",
+ query->module ?: "",
+ fmtlen, query->format ?: "",
query->first_lineno, query->last_lineno);
}
@@@ -146,13 -138,13 +146,13 @@@
* logs the changes. Takes ddebug_lock.
*/
static int ddebug_change(const struct ddebug_query *query,
- unsigned int flags, unsigned int mask)
+ struct flag_settings *modifiers)
{
int i;
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- char flagbuf[10];
+ struct flagsbuf fbuf;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@@ -181,16 -173,9 +181,16 @@@
continue;
/* match against the format */
- if (query->format &&
- !strstr(dp->format, query->format))
- continue;
+ if (query->format) {
+ if (*query->format == '^') {
+ char *p;
+ /* anchored search. match must be at beginning */
+ p = strstr(dp->format, query->format+1);
+ if (p != dp->format)
+ continue;
+ } else if (!strstr(dp->format, query->format))
+ continue;
+ }
/* match against the line number range */
if (query->first_lineno &&
@@@ -202,21 -187,22 +202,21 @@@
nfound++;
- newflags = (dp->flags & mask) | flags;
+ newflags = (dp->flags & modifiers->mask) | modifiers->flags;
if (newflags == dp->flags)
continue;
#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
- if (!(flags & _DPRINTK_FLAGS_PRINT))
+ if (!(modifiers->flags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
- } else if (flags & _DPRINTK_FLAGS_PRINT)
+ } else if (modifiers->flags & _DPRINTK_FLAGS_PRINT)
static_branch_enable(&dp->key.dd_key_true);
#endif
dp->flags = newflags;
- vpr_info("changed %s:%d [%s]%s =%s\n",
+ v2pr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dt->mod_name, dp->function,
- ddebug_describe_flags(dp, flagbuf,
- sizeof(flagbuf)));
+ ddebug_describe_flags(dp->flags, &fbuf));
}
}
mutex_unlock(&ddebug_lock);
@@@ -303,41 -289,6 +303,41 @@@ static inline int parse_lineno(const ch
return 0;
}
+static int parse_linerange(struct ddebug_query *query, const char *first)
+{
+ char *last = strchr(first, '-');
+
+ if (query->first_lineno || query->last_lineno) {
+ pr_err("match-spec: line used 2x\n");
+ return -EINVAL;
+ }
+ if (last)
+ *last++ = '\0';
+ if (parse_lineno(first, &query->first_lineno) < 0)
+ return -EINVAL;
+ if (last) {
+ /* range <first>-<last> */
+ if (parse_lineno(last, &query->last_lineno) < 0)
+ return -EINVAL;
+
+ /* special case for last lineno not specified */
+ if (query->last_lineno == 0)
+ query->last_lineno = UINT_MAX;
+
+ if (query->last_lineno < query->first_lineno) {
+ pr_err("last-line:%d < 1st-line:%d\n",
+ query->last_lineno,
+ query->first_lineno);
+ return -EINVAL;
+ }
+ } else {
+ query->last_lineno = query->first_lineno;
+ }
+ vpr_info("parsed line %d-%d\n", query->first_lineno,
+ query->last_lineno);
+ return 0;
+}
+
static int check_set(const char **dest, char *src, char *name)
{
int rc = 0;
@@@ -353,8 -304,7 +353,8 @@@
/*
* Parse words[] as a ddebug query specification, which is a series
- * of (keyword, value) pairs chosen from these possibilities:
+ * of (keyword, value) pairs or combined keyword=value terms,
+ * chosen from these possibilities:
*
* func <function-name>
* file <full-pathname>
@@@ -372,62 -322,61 +372,62 @@@ static int ddebug_parse_query(char *wor
{
unsigned int i;
int rc = 0;
-
- /* check we have an even number of words */
- if (nwords % 2 != 0) {
- pr_err("expecting pairs of match-spec <value>\n");
- return -EINVAL;
- }
- memset(query, 0, sizeof(*query));
+ char *fline;
+ char *keyword, *arg;
if (modname)
/* support $modname.dyndbg=<multiple queries> */
query->module = modname;
- for (i = 0; i < nwords; i += 2) {
- if (!strcmp(words[i], "func")) {
- rc = check_set(&query->function, words[i+1], "func");
- } else if (!strcmp(words[i], "file")) {
- rc = check_set(&query->filename, words[i+1], "file");
- } else if (!strcmp(words[i], "module")) {
- rc = check_set(&query->module, words[i+1], "module");
- } else if (!strcmp(words[i], "format")) {
- string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
- UNESCAPE_OCTAL |
- UNESCAPE_SPECIAL);
- rc = check_set(&query->format, words[i+1], "format");
- } else if (!strcmp(words[i], "line")) {
- char *first = words[i+1];
- char *last = strchr(first, '-');
- if (query->first_lineno || query->last_lineno) {
- pr_err("match-spec: line used 2x\n");
+ for (i = 0; i < nwords; i++) {
+ /* accept keyword=arg */
+ vpr_info("%d w:%s\n", i, words[i]);
+
+ keyword = words[i];
+ arg = strchr(keyword, '=');
+ if (arg) {
+ *arg++ = '\0';
+ } else {
+ i++; /* next word is arg */
+ if (!(i < nwords)) {
+ pr_err("missing arg to keyword: %s\n", keyword);
return -EINVAL;
}
- if (last)
- *last++ = '\0';
- if (parse_lineno(first, &query->first_lineno) < 0)
- return -EINVAL;
- if (last) {
- /* range <first>-<last> */
- if (parse_lineno(last, &query->last_lineno) < 0)
- return -EINVAL;
+ arg = words[i];
+ }
+ vpr_info("%d key:%s arg:%s\n", i, keyword, arg);
- /* special case for last lineno not specified */
- if (query->last_lineno == 0)
- query->last_lineno = UINT_MAX;
+ if (!strcmp(keyword, "func")) {
+ rc = check_set(&query->function, arg, "func");
+ } else if (!strcmp(keyword, "file")) {
+ if (check_set(&query->filename, arg, "file"))
+ return -EINVAL;
- if (query->last_lineno < query->first_lineno) {
- pr_err("last-line:%d < 1st-line:%d\n",
- query->last_lineno,
- query->first_lineno);
+ /* tail :$info is function or line-range */
+ fline = strchr(query->filename, ':');
+ if (!fline)
+ break;
+ *fline++ = '\0';
+ if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ /* take as function name */
+ if (check_set(&query->function, fline, "func"))
return -EINVAL;
- }
} else {
- query->last_lineno = query->first_lineno;
+ if (parse_linerange(query, fline))
+ return -EINVAL;
}
+ } else if (!strcmp(keyword, "module")) {
+ rc = check_set(&query->module, arg, "module");
+ } else if (!strcmp(keyword, "format")) {
+ string_unescape_inplace(arg, UNESCAPE_SPACE |
+ UNESCAPE_OCTAL |
+ UNESCAPE_SPECIAL);
+ rc = check_set(&query->format, arg, "format");
+ } else if (!strcmp(keyword, "line")) {
+ if (parse_linerange(query, arg))
+ return -EINVAL;
} else {
- pr_err("unknown keyword \"%s\"\n", words[i]);
+ pr_err("unknown keyword \"%s\"\n", keyword);
return -EINVAL;
}
if (rc)
@@@ -443,9 -392,11 +443,9 @@@
* flags fields of matched _ddebug's. Returns 0 on success
* or <0 on error.
*/
-static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
- unsigned int *maskp)
+static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
{
- unsigned flags = 0;
- int op = '=', i;
+ int op, i;
switch (*str) {
case '+':
@@@ -462,40 -413,40 +462,40 @@@
for (; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
if (*str == opt_array[i].opt_char) {
- flags |= opt_array[i].flag;
+ modifiers->flags |= opt_array[i].flag;
break;
}
}
if (i < 0) {
- pr_err("unknown flag '%c' in \"%s\"\n", *str, str);
+ pr_err("unknown flag '%c'\n", *str);
return -EINVAL;
}
}
- vpr_info("flags=0x%x\n", flags);
+ vpr_info("flags=0x%x\n", modifiers->flags);
- /* calculate final *flagsp, *maskp according to mask and op */
+ /* calculate final flags, mask based upon op */
switch (op) {
case '=':
- *maskp = 0;
- *flagsp = flags;
+ /* modifiers->flags already set */
+ modifiers->mask = 0;
break;
case '+':
- *maskp = ~0U;
- *flagsp = flags;
+ modifiers->mask = ~0U;
break;
case '-':
- *maskp = ~flags;
- *flagsp = 0;
+ modifiers->mask = ~modifiers->flags;
+ modifiers->flags = 0;
break;
}
- vpr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
+ vpr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags,
modifiers->mask);
+
return 0;
}
static int ddebug_exec_query(char *query_string, const char *modname)
{
- unsigned int flags = 0, mask = 0;
- struct ddebug_query query;
+ struct flag_settings modifiers = {};
+ struct ddebug_query query = {};
#define MAXWORDS 9
int nwords, nfound;
char *words[MAXWORDS];
@@@ -506,7 -457,7 +506,7 @@@
return -EINVAL;
}
/* check flags 1st (last arg) so query is pairs of spec,val */
- if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) {
+ if (ddebug_parse_flags(words[nwords-1], &modifiers)) {
pr_err("flags parse failed\n");
return -EINVAL;
}
@@@ -515,7 -466,7 +515,7 @@@
return -EINVAL;
}
/* actually go and implement the change */
- nfound = ddebug_change(&query, flags, mask);
+ nfound = ddebug_change(&query, &modifiers);
vpr_info_dq(&query, nfound ? "applied" : "no-match");
return nfound;
@@@ -525,7 -476,7 +525,7 @@@
last error or number of matching callsites. Module name is either
in param (for boot arg) or perhaps in query string.
*/
-static int ddebug_exec_queries(char *query, const char *modname)
+int ddebug_exec_queries(char *query, const char *modname)
{
char *split;
int i, errs = 0, exitcode = 0, rc, nfound = 0;
@@@ -557,7 -508,6 +557,7 @@@
return exitcode;
return nfound;
}
+EXPORT_SYMBOL_GPL(ddebug_exec_queries);
#define PREFIX_SIZE 64
@@@ -821,6 -771,8 +821,6 @@@ static void *ddebug_proc_start(struct s
struct _ddebug *dp;
int n = *pos;
- vpr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
-
mutex_lock(&ddebug_lock);
if (!n)
@@@ -843,6 -795,9 +843,6 @@@ static void *ddebug_proc_next(struct se
struct ddebug_iter *iter = m->private;
struct _ddebug *dp;
- vpr_info("called m=%p p=%p *pos=%lld\n",
- m, p, (unsigned long long)*pos);
-
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
else
@@@ -861,7 -816,9 +861,7 @@@ static int ddebug_proc_show(struct seq_
{
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
- char flagsbuf[10];
-
- vpr_info("called m=%p p=%p\n", m, p);
+ struct flagsbuf flags;
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@@ -872,7 -829,7 +872,7 @@@
seq_printf(m, "%s:%u [%s]%s =%s \"",
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
+ ddebug_describe_flags(dp->flags, &flags));
seq_escape(m, dp->format, "\t\r\n\"");
seq_puts(m, "\"\n");
@@@ -885,6 -842,7 +885,6 @@@
*/
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
- vpr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@@ -895,6 -853,13 +895,6 @@@ static const struct seq_operations ddeb
.stop = ddebug_proc_stop
};
-/*
- * File_ops->open method for <debugfs>/dynamic_debug/control. Does
- * the seq_file setup dance, and also creates an iterator to walk the
- * _ddebugs. Note that we create a seq_file always, even for O_WRONLY
- * files where it's not needed, as doing so simplifies the ->release
- * method.
- */
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
vpr_info("called\n");
@@@ -905,7 -870,7 +905,7 @@@
static const struct file_operations ddebug_proc_fops = {
.owner = THIS_MODULE,
.open = ddebug_proc_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = seq_release_private,
.write = ddebug_proc_write
@@@ -913,7 -878,7 +913,7 @@@
static const struct proc_ops proc_fops = {
.proc_open = ddebug_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release_private,
.proc_write = ddebug_proc_write
@@@ -944,10 -909,10 +944,10 @@@ int ddebug_add_module(struct _ddebug *t
dt->ddebugs = tab;
mutex_lock(&ddebug_lock);
- list_add_tail(&dt->link, &ddebug_tables);
+ list_add(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- vpr_info("%u debug prints in module %s\n", n, dt->mod_name);
+ v2pr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
@@@ -1006,7 -971,7 +1006,7 @@@ int ddebug_remove_module(const char *mo
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
- vpr_info("removing module \"%s\"\n", mod_name);
+ v2pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@@ -1064,8 -1029,9 +1064,8 @@@ static int __init dynamic_debug_init(vo
char *cmdline;
int ret = 0;
int n = 0, entries = 0, modct = 0;
- int verbose_bytes = 0;
- if (&__start___verbose == &__stop___verbose) {
+ if (&__start___dyndbg == &__stop___dyndbg) {
if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
@@@ -1074,11 -1040,14 +1074,11 @@@
ddebug_init_success = 1;
return 0;
}
- iter = __start___verbose;
+ iter = __start___dyndbg;
modname = iter->modname;
iter_start = iter;
- for (; iter < __stop___verbose; iter++) {
+ for (; iter < __stop___dyndbg; iter++) {
entries++;
- verbose_bytes += strlen(iter->modname) + strlen(iter->function)
- + strlen(iter->filename) + strlen(iter->format);
-
if (strcmp(modname, iter->modname)) {
modct++;
ret = ddebug_add_module(iter_start, n, modname);
@@@ -1095,9 -1064,9 +1095,9 @@@
goto out_err;
ddebug_init_success = 1;
- vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in
(readonly) verbose section\n",
+ vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in
__dyndbg section\n",
modct, entries, (int)(modct * sizeof(struct ddebug_table)),
- verbose_bytes + (int)(__stop___verbose - __start___verbose));
+ (int)(entries * sizeof(struct _ddebug)));
/* apply ddebug_query boot param, dont unload tables on err */
if (ddebug_setup_string[0] != '\0') {
diff --combined mm/shmem.c
index b2abca3f7f33,f019ff500844..9eb74eef1c8f
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@@ -3178,7 -3178,7 +3178,7 @@@ static int shmem_initxattrs(struct inod
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL);
if (!new_xattr->name) {
- kfree(new_xattr);
+ kvfree(new_xattr);
return -ENOMEM;
}
@@@ -3756,7 -3756,6 +3756,6 @@@ static const struct file_operations shm
.read_iter = shmem_file_read_iter,
.write_iter = generic_file_write_iter,
.fsync = noop_fsync,
- .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = shmem_fallocate,
#endif
diff --combined mm/slab_common.c
index fe8b68482670,5cf40d2d721c..aa24654d99c6
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@@ -326,14 -326,6 +326,14 @@@ int slab_unmergeable(struct kmem_cache
if (s->refcount < 0)
return 1;
+#ifdef CONFIG_MEMCG_KMEM
+ /*
+ * Skip the dying kmem_cache.
+ */
+ if (s->memcg_params.dying)
+ return 1;
+#endif
+
return 0;
}
@@@ -894,15 -886,12 +894,15 @@@ static int shutdown_memcg_caches(struc
return 0;
}
-static void flush_memcg_workqueue(struct kmem_cache *s)
+static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
{
spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true;
spin_unlock_irq(&memcg_kmem_wq_lock);
+}
+static void flush_memcg_workqueue(struct kmem_cache *s)
+{
/*
* SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked.
@@@ -934,6 -923,10 +934,6 @@@ static inline int shutdown_memcg_caches
{
return 0;
}
-
-static inline void flush_memcg_workqueue(struct kmem_cache *s)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s)
@@@ -951,6 -944,8 +951,6 @@@ void kmem_cache_destroy(struct kmem_cac
if (unlikely(!s))
return;
- flush_memcg_workqueue(s);
-
get_online_cpus();
get_online_mems();
@@@ -960,22 -955,6 +960,22 @@@
if (s->refcount)
goto out_unlock;
+#ifdef CONFIG_MEMCG_KMEM
+ memcg_set_kmem_cache_dying(s);
+
+ mutex_unlock(&slab_mutex);
+
+ put_online_mems();
+ put_online_cpus();
+
+ flush_memcg_workqueue(s);
+
+ get_online_cpus();
+ get_online_mems();
+
+ mutex_lock(&slab_mutex);
+#endif
+
err = shutdown_memcg_caches(s);
if (!err)
err = shutdown_cache(s);
@@@ -1605,7 -1584,7 +1605,7 @@@ static int slabinfo_open(struct inode *
static const struct proc_ops slabinfo_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = slabinfo_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = slabinfo_write,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
diff --combined mm/swapfile.c
index 6c26916e95fd,2e50f52a14c7..c399cac8ab33
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@@ -2835,7 -2835,7 +2835,7 @@@ static int swaps_open(struct inode *ino
static const struct proc_ops swaps_proc_ops = {
.proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = swaps_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
.proc_poll = swaps_poll,
@@@ -2929,7 -2929,7 +2929,7 @@@ static int claim_swapfile(struct swap_i
* write only restriction. Hence zoned block devices are not
* suitable for swapping. Disallow them here.
*/
- if (blk_queue_is_zoned(p->bdev->bd_queue))
+ if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
return -EINVAL;
p->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
diff --combined net/bluetooth/6lowpan.c
index cff4944d5b66,573ef0c80521..7c492105b8c8
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@@ -50,7 -50,6 +50,7 @@@ static bool enable_6lowpan
/* We are listening incoming connections via this channel
*/
static struct l2cap_chan *listen_chan;
+static DEFINE_MUTEX(set_lock);
struct lowpan_peer {
struct list_head list;
@@@ -1079,14 -1078,12 +1079,14 @@@ static void do_enable_set(struct work_s
enable_6lowpan = set_enable->flag;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
}
listen_chan = bt_6lowpan_listen();
+ mutex_unlock(&set_lock);
kfree(set_enable);
}
@@@ -1138,13 -1135,11 +1138,13 @@@ static ssize_t lowpan_control_write(str
if (ret == -EINVAL)
return ret;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
listen_chan = NULL;
}
+ mutex_unlock(&set_lock);
if (conn) {
struct lowpan_peer *peer;
@@@ -1210,7 -1205,7 +1210,7 @@@ static int lowpan_control_open(struct i
static const struct file_operations lowpan_control_fops = {
.open = lowpan_control_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = lowpan_control_write,
.llseek = seq_lseek,
.release = single_release,
diff --combined net/hsr/hsr_debugfs.c
index 3b6f675bd55a,d994c83b0178..772af706c528
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@@ -1,5 -1,5 +1,5 @@@
/*
- * hsr_debugfs code
+ * debugfs code for HSR & PRP
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Author(s):
@@@ -24,7 -24,7 +24,7 @@@ static struct dentry *hsr_debugfs_root_
static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
{
- seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
+ seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x ",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
}
@@@ -35,32 -35,20 +35,32 @@@ hsr_node_table_show(struct seq_file *sf
struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
struct hsr_node *node;
- seq_puts(sfp, "Node Table entries\n");
- seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
- seq_puts(sfp, "time_in[B], Address-B port\n");
+ seq_printf(sfp, "Node Table entries for (%s) device\n",
+ (priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
+ seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
+ seq_puts(sfp, "time_in[B], Address-B port, ");
+ if (priv->prot_version == PRP_V1)
+ seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n");
+ else
+ seq_puts(sfp, "DAN-H\n");
+
rcu_read_lock();
list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
/* skip self node */
if (hsr_addr_is_self(priv, node->macaddress_A))
continue;
print_mac_address(sfp, &node->macaddress_A[0]);
- seq_puts(sfp, " ");
print_mac_address(sfp, &node->macaddress_B[0]);
- seq_printf(sfp, "0x%lx, ", node->time_in[HSR_PT_SLAVE_A]);
- seq_printf(sfp, "0x%lx ", node->time_in[HSR_PT_SLAVE_B]);
- seq_printf(sfp, "0x%x\n", node->addr_B_port);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
+ seq_printf(sfp, "%14x, ", node->addr_B_port);
+
+ if (priv->prot_version == PRP_V1)
+ seq_printf(sfp, "%5x, %5x, %5x\n",
+ node->san_a, node->san_b,
+ (node->san_a == 0 && node->san_b == 0));
+ else
+ seq_printf(sfp, "%5x\n", 1);
}
rcu_read_unlock();
return 0;
@@@ -69,8 -57,7 +69,8 @@@
/* hsr_node_table_open - Open the node_table file
*
* Description:
- * This routine opens a debugfs file node_table of specific hsr device
+ * This routine opens a debugfs file node_table of specific hsr
+ * or prp device
*/
static int
hsr_node_table_open(struct inode *inode, struct file *filp)
@@@ -93,7 -80,7 +93,7 @@@ void hsr_debugfs_rename(struct net_devi
static const struct file_operations hsr_fops = {
.open = hsr_node_table_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = single_release,
};
diff --combined net/ipv4/netfilter/ipt_CLUSTERIP.c
index a8b980ad11d4,67472389c9c3..40af4bbf4519
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@@ -3,7 -3,7 +3,7 @@@
* (C) 2003-2004 by Harald Welte <laforge(a)netfilter.org>
* based on ideas of Fabio Olive Leite <olive(a)unixforge.org>
*
- * Development of this code funded by SuSE Linux AG,
http://www.suse.com/
+ * Development of this code funded by SuSE Linux AG,
https://www.suse.com/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@@ -806,7 -806,7 +806,7 @@@ static ssize_t clusterip_proc_write(str
static const struct proc_ops clusterip_proc_ops = {
.proc_open = clusterip_proc_open,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
.proc_write = clusterip_proc_write,
.proc_lseek = seq_lseek,
.proc_release = clusterip_proc_release,
diff --combined net/l2tp/l2tp_debugfs.c
index 96cb9601c21b,6268c42e6bee..b31edf141d26
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@@ -1,5 -1,6 +1,5 @@@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * L2TP subsystem debugfs
+/* L2TP subsystem debugfs
*
* Copyright (c) 2010 Katalix Systems Ltd
*/
@@@ -58,10 -59,11 +58,10 @@@ static void l2tp_dfs_next_session(struc
pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
pd->session_idx++;
- if (pd->session == NULL) {
+ if (!pd->session) {
pd->session_idx = 0;
l2tp_dfs_next_tunnel(pd);
}
-
}
static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
@@@ -72,25 -74,23 +72,25 @@@
if (!pos)
goto out;
- BUG_ON(m->private == NULL);
+ if (WARN_ON(!m->private)) {
+ pd = NULL;
+ goto out;
+ }
pd = m->private;
- if (pd->tunnel == NULL)
+ if (!pd->tunnel)
l2tp_dfs_next_tunnel(pd);
else
l2tp_dfs_next_session(pd);
/* NULL tunnel and session indicates end of list */
- if ((pd->tunnel == NULL) && (pd->session == NULL))
+ if (!pd->tunnel && !pd->session)
pd = NULL;
out:
return pd;
}
-
static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
@@@ -148,13 -148,11 +148,13 @@@ static void l2tp_dfs_seq_tunnel_show(st
const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
seq_printf(m, " from %pI6c to %pI6c\n",
- &np->saddr, &tunnel->sock->sk_v6_daddr);
- } else
+ &np->saddr, &tunnel->sock->sk_v6_daddr);
+ }
#endif
- seq_printf(m, " from %pI4 to %pI4\n",
- &inet->inet_saddr, &inet->inet_daddr);
+ if (tunnel->sock->sk_family == AF_INET)
+ seq_printf(m, " from %pI4 to %pI4\n",
+ &inet->inet_saddr, &inet->inet_daddr);
+
if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
seq_printf(m, " source port %hu, dest port %hu\n",
ntohs(inet->inet_sport), ntohs(inet->inet_dport));
@@@ -204,7 -202,7 +204,7 @@@ static void l2tp_dfs_seq_session_show(s
seq_printf(m, "%02x%02x%02x%02x",
session->cookie[4], session->cookie[5],
session->cookie[6], session->cookie[7]);
- seq_printf(m, "\n");
+ seq_puts(m, "\n");
}
if (session->peer_cookie_len) {
seq_printf(m, " peer cookie %02x%02x%02x%02x",
@@@ -214,7 -212,7 +214,7 @@@
seq_printf(m, "%02x%02x%02x%02x",
session->peer_cookie[4], session->peer_cookie[5],
session->peer_cookie[6], session->peer_cookie[7]);
- seq_printf(m, "\n");
+ seq_puts(m, "\n");
}
seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
@@@ -226,7 -224,7 +226,7 @@@
atomic_long_read(&session->stats.rx_bytes),
atomic_long_read(&session->stats.rx_errors));
- if (session->show != NULL)
+ if (session->show)
session->show(m, session);
}
@@@ -273,7 -271,7 +273,7 @@@ static int l2tp_dfs_seq_open(struct ino
int rc = -ENOMEM;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (pd == NULL)
+ if (!pd)
goto out;
/* Derive the network namespace from the pid opening the
@@@ -320,7 -318,7 +320,7 @@@ static int l2tp_dfs_seq_release(struct
static const struct file_operations l2tp_dfs_fops = {
.owner = THIS_MODULE,
.open = l2tp_dfs_seq_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = l2tp_dfs_seq_release,
};
diff --combined net/sunrpc/rpc_pipe.c
index eadc0ede928c,82842fe2767f..a202251d8e01
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@@ -449,7 -449,7 +449,7 @@@ rpc_info_release(struct inode *inode, s
static const struct file_operations rpc_info_operations = {
.owner = THIS_MODULE,
.open = rpc_info_open,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.release = rpc_info_release,
};
@@@ -1510,6 -1510,6 +1510,6 @@@ err_notifier
void unregister_rpc_pipefs(void)
{
rpc_clients_notifier_unregister();
- kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
+ kmem_cache_destroy(rpc_inode_cachep);
}
diff --combined security/smack/smackfs.c
index 9c4308077574,02d48f363f05..62dd3fe67138
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@@ -671,7 -671,7 +671,7 @@@ static ssize_t smk_write_load(struct fi
static const struct file_operations smk_load_ops = {
.open = smk_open_load,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_load,
.release = seq_release,
@@@ -884,7 -884,7 +884,7 @@@ static ssize_t smk_set_cipso(struct fil
}
ret = sscanf(rule, "%d", &maplevel);
- if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
rule += SMK_DIGITLEN;
@@@ -905,10 -905,6 +905,10 @@@
for (i = 0; i < catlen; i++) {
rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
ret = sscanf(rule, "%u", &cat);
if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
goto out;
@@@ -948,7 -944,7 +948,7 @@@ static ssize_t smk_write_cipso(struct f
static const struct file_operations smk_cipso_ops = {
.open = smk_open_cipso,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_cipso,
.release = seq_release,
@@@ -1022,7 -1018,7 +1022,7 @@@ static ssize_t smk_write_cipso2(struct
static const struct file_operations smk_cipso2_ops = {
.open = smk_open_cipso2,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_cipso2,
.release = seq_release,
@@@ -1287,7 -1283,7 +1287,7 @@@ free_data_out
static const struct file_operations smk_net4addr_ops = {
.open = smk_open_net4addr,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_net4addr,
.release = seq_release,
@@@ -1544,7 -1540,7 +1544,7 @@@ free_data_out
static const struct file_operations smk_net6addr_ops = {
.open = smk_open_net6addr,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_net6addr,
.release = seq_release,
@@@ -2032,7 -2028,7 +2032,7 @@@ static ssize_t smk_write_onlycap(struc
static const struct file_operations smk_onlycap_ops = {
.open = smk_open_onlycap,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.write = smk_write_onlycap,
.llseek = seq_lseek,
.release = seq_release,
@@@ -2256,7 -2252,7 +2256,7 @@@ static ssize_t smk_write_load_self(stru
static const struct file_operations smk_load_self_ops = {
.open = smk_open_load_self,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_load_self,
.release = seq_release,
@@@ -2390,7 -2386,7 +2390,7 @@@ static ssize_t smk_write_load2(struct f
static const struct file_operations smk_load2_ops = {
.open = smk_open_load2,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_load2,
.release = seq_release,
@@@ -2463,7 -2459,7 +2463,7 @@@ static ssize_t smk_write_load_self2(str
static const struct file_operations smk_load_self2_ops = {
.open = smk_open_load_self2,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_load_self2,
.release = seq_release,
@@@ -2724,6 -2720,7 +2724,6 @@@ static int smk_open_relabel_self(struc
static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_smack *tsp = smack_cred(current_cred());
char *data;
int rc;
LIST_HEAD(list_tmp);
@@@ -2748,28 -2745,18 +2748,28 @@@
kfree(data);
if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+ struct cred *new;
+ struct task_smack *tsp;
+
+ new = prepare_creds();
+ if (!new) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ tsp = smack_cred(new);
smk_destroy_label_list(&tsp->smk_relabel);
list_splice(&list_tmp, &tsp->smk_relabel);
+ commit_creds(new);
return count;
}
-
+out:
smk_destroy_label_list(&list_tmp);
return rc;
}
static const struct file_operations smk_relabel_self_ops = {
.open = smk_open_relabel_self,
- .read = seq_read,
+ .read_iter = seq_read_iter,
.llseek = seq_lseek,
.write = smk_write_relabel_self,
.release = seq_release,
diff --combined sound/core/info.c
index 9fec3070f8ba,6e2a35a37b5e..7bddc2b89a42
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@@ -426,7 -426,7 +426,7 @@@ static const struct proc_ops snd_info_t
.proc_release = snd_info_text_entry_release,
.proc_write = snd_info_text_entry_write,
.proc_lseek = seq_lseek,
- .proc_read = seq_read,
+ .proc_read_iter = seq_read_iter,
};
static struct snd_info_entry *create_subdir(struct module *mod,
@@@ -606,9 -606,7 +606,9 @@@ int snd_info_get_line(struct snd_info_b
{
int c;
- if (snd_BUG_ON(!buffer || !buffer->buffer))
+ if (snd_BUG_ON(!buffer))
+ return 1;
+ if (!buffer->buffer)
return 1;
if (len <= 0 || buffer->stop || buffer->error)
return 1;
--
LinuxNextTracking