The following commit has been merged in the master branch: commit 42933bac11e811f02200c944d8562a15f8ec4ff0 Merge: 2b9accbee563f535046ff2cd382d0acaa92e130c 25985edcedea6396277003854657b5f3cb31a628 Author: Linus Torvalds torvalds@linux-foundation.org Date: Thu Apr 7 11:14:49 2011 -0700
Merge branch 'for-linus2' of git://git.profusion.mobi/users/lucas/linux-2.6
* 'for-linus2' of git://git.profusion.mobi/users/lucas/linux-2.6: Fix common misspellings
diff --combined Documentation/edac.txt index ccc07c2,44364fa..249822c --- a/Documentation/edac.txt +++ b/Documentation/edac.txt @@@ -311,7 -311,7 +311,7 @@@ Total Correctable Errors count attribut 'ce_noinfo_count'
This attribute file displays the number of CEs that - have occurred wherewith no informations as to which DIMM slot + have occurred wherewith no information as to which DIMM slot is having errors. Memory is handicapped, but operational, yet no information is available to indicate which slot the failing memory is in. This count field should be also @@@ -741,7 -741,7 +741,7 @@@ were done at i7core_edac driver. This c As EDAC API maps the minimum unity is csrows, the driver sequencially maps channel/dimm into different csrows.
- For example, suposing the following layout: + For example, supposing the following layout: Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400 dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400 diff --combined Documentation/kernel-parameters.txt index d2b5150,49e9796..cc85a92 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@@ -699,7 -699,7 +699,7 @@@ bytes respectively. Such letter suffixe ekgdboc= [X86,KGDB] Allow early kernel console debugging ekgdboc=kbd
- This is desgined to be used in conjunction with + This is designed to be used in conjunction with the boot argument: earlyprintk=vga
edd= [EDD] @@@ -1832,17 -1832,15 +1832,17 @@@ perfmon on Intel CPUs instead of the CPU specific event set.
- oops=panic Always panic on oopses. Default is to just kill the process, - but there is a small probability of deadlocking the machine. + oops=panic Always panic on oopses. Default is to just kill the + process, but there is a small probability of + deadlocking the machine. This will also cause panics on machine check exceptions. Useful together with panic=30 to trigger a reboot.
OSS [HW,OSS] See Documentation/sound/oss/oss-parameters.txt
- panic= [KNL] Kernel behaviour on panic + panic= [KNL] Kernel behaviour on panic: delay <timeout> + seconds before rebooting Format: <timeout>
parkbd.port= [HW] Parallel port number the keyboard adapter is @@@ -2345,7 -2343,6 +2345,7 @@@
softlockup_panic= [KNL] Should the soft-lockup detector generate panics. + Format: <integer>
sonypi.*= [HW] Sony Programmable I/O Control Device driver See Documentation/sonypi.txt @@@ -2478,8 -2475,8 +2478,8 @@@ topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu - topology informations if the hardware supports these. - The scheduler will make use of these informations and + topology information if the hardware supports this. + The scheduler will make use of this information and e.g. base its process migration decisions on it. Default is on.
@@@ -2532,7 -2529,8 +2532,7 @@@ reported either.
unknown_nmi_panic - [X86] - Set unknown_nmi_panic=1 early on boot. + [X86] Cause panic on unknown NMI.
usbcore.autosuspend= [USB] The autosuspend time delay (in seconds) used diff --combined Documentation/sound/alsa/ALSA-Configuration.txt index 181ba5b,b9b7f30..9822afb --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@@ -322,7 -322,7 +322,7 @@@ Prior to version 0.9.0rc4 options had "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240) or the value stored in the card's EEPROM for cards that have an EEPROM and their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can - be choosen freely from the options enumerated above. + be chosen freely from the options enumerated above.
If dma2 is specified and different from dma1, the card will operate in full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to @@@ -356,7 -356,7 +356,7 @@@ "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240) or the value stored in the card's EEPROM for cards that have an EEPROM and their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can - be choosen freely from the options enumerated above. + be chosen freely from the options enumerated above.
If dma2 is specified and different from dma1, the card will operate in full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to @@@ -2229,7 -2229,7 +2229,7 @@@ Proc interfaces (/proc/asound
/proc/asound/card#/pcm#[cp]/oss ------------------------------- - String "erase" - erase all additional informations about OSS applications + String "erase" - erase all additional information about OSS applications String "<app_name> <fragments> <fragment_size> [<options>]"
<app_name> - name of application with (higher priority) or without path diff --combined Documentation/video4linux/bttv/Sound-FAQ index bc5e41d,b6e6deb..395f6c6 --- a/Documentation/video4linux/bttv/Sound-FAQ +++ b/Documentation/video4linux/bttv/Sound-FAQ @@@ -2,13 -2,13 +2,13 @@@ bttv and sound mini howto =========================
- There are alot of different bt848/849/878/879 based boards available. + There are a lot of different bt848/849/878/879 based boards available. Making video work often is not a big deal, because this is handled completely by the bt8xx chip, which is common on all boards. But sound is handled in slightly different ways on each board.
To handle the grabber boards correctly, there is a array tvcards[] in -bttv-cards.c, which holds the informations required for each board. +bttv-cards.c, which holds the information required for each board. Sound will work only, if the correct entry is used (for video it often makes no difference). The bttv driver prints a line to the kernel log, telling which card type is used. Like this one: diff --combined arch/ia64/sn/kernel/irq.c index 139c018,1c50e4c..81a1f4e --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@@ -227,7 -227,7 +227,7 @@@ void sn_set_err_irq_affinity(unsigned i { /* * On systems which support CPU disabling (SHub2), all error interrupts - * are targetted at the boot CPU. + * are targeted at the boot CPU. */ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) set_irq_affinity_info(irq, cpu_physical_id(0), 0); @@@ -412,7 -412,7 +412,7 @@@ sn_call_force_intr_provider(struct sn_i pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
/* Don't force an interrupt if the irq has been disabled */ - if (!irqd_irq_disabled(sn_irq_info->irq_irq) && + if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) && pci_provider && pci_provider->force_interrupt) (*pci_provider->force_interrupt)(sn_irq_info); } @@@ -435,7 -435,7 +435,7 @@@ static void sn_check_intr(int irq, stru /* * Bridge types attached to TIO (anything but PIC) do not need this WAR * since they do not target Shub II interrupt registers. If that - * ever changes, this check needs to accomodate. + * ever changes, this check needs to accommodate. */ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) return; diff --combined arch/powerpc/kernel/head_64.S index 271140b,285e6f7..3a319f9 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@@ -40,7 -40,7 +40,7 @@@ #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h>
- /* The physical memory is layed out such that the secondary processor + /* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ @@@ -536,13 -536,6 +536,13 @@@ _GLOBAL(pmac_secondary_start add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
+ /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --combined arch/powerpc/mm/tlb_low_64e.S index 2228151,33cf704..af08922 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@@ -192,7 -192,7 +192,7 @@@ normal_tlb_miss or r10,r15,r14
BEGIN_MMU_FTR_SECTION - /* Set the TLB reservation and seach for existing entry. Then load + /* Set the TLB reservation and search for existing entry. Then load * the entry. */ PPC_TLBSRX_DOT(0,r16) @@@ -425,13 -425,13 +425,13 @@@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_T
virt_page_table_tlb_miss_fault: /* If we fault here, things are a little bit tricky. We need to call - * either data or instruction store fault, and we need to retreive + * either data or instruction store fault, and we need to retrieve * the original fault address and ESR (for data). * * The thing is, we know that in normal circumstances, this is * always called as a second level tlb miss for SW load or as a first * level TLB miss for HW load, so we should be able to peek at the - * relevant informations in the first exception frame in the PACA. + * relevant information in the first exception frame in the PACA. * * However, we do need to double check that, because we may just hit * a stray kernel pointer or a userland attack trying to hit those diff --combined arch/sparc/kernel/entry.S index 906ee3e,8e607b3..6da784a --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@@ -801,7 -801,7 +801,7 @@@ vac_linesize_patch_32: subcc %l7, 32, .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
/* - * Ugly, but we cant use hardware flushing on the sun4 and we'd require + * Ugly, but we can't use hardware flushing on the sun4 and we'd require * two instructions (Anton) */ vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 @@@ -851,7 -851,7 +851,7 @@@ sun4c_fault sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
/* If the kernel references a bum kernel pointer, or a pte which - * points to a non existant page in ram, we will run this code + * points to a non existent page in ram, we will run this code * _forever_ and lock up the machine!!!!! So we must check for * this condition, the AC_SYNC_ERR bits are what we must examine. * Also a parity error would make this happen as well. So we just @@@ -1283,7 -1283,7 +1283,7 @@@ linux_syscall_trace .globl ret_from_fork ret_from_fork: call schedule_tail - mov %g3, %o0 + ld [%g3 + TI_TASK], %o0 b ret_sys_call ld [%sp + STACKFRAME_SZ + PT_I0], %o0
diff --combined block/blk-core.c index 725091d,071ae6d..90f22cc --- a/block/blk-core.c +++ b/block/blk-core.c @@@ -1184,7 -1184,7 +1184,7 @@@ static bool bio_attempt_front_merge(str
/* * Attempts to merge with the plugged list in the current process. Returns - * true if merge was succesful, otherwise false. + * true if merge was successful, otherwise false. */ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, struct bio *bio) @@@ -2163,7 -2163,7 +2163,7 @@@ bool blk_update_request(struct request * size, something has gone terribly wrong. */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { - printk(KERN_ERR "blk: request botched\n"); + blk_dump_rq_flags(req, "request botched"); req->__data_len = blk_rq_cur_bytes(req); }
@@@ -2665,7 -2665,7 +2665,7 @@@ static int plug_rq_cmp(void *priv, stru struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->q == rqb->q); + return !(rqa->q <= rqb->q); }
static void flush_plug_list(struct blk_plug *plug) diff --combined block/blk-throttle.c index 6c98cfe,c8b16c8..0475a22 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@@ -77,7 -77,7 +77,7 @@@ struct throtl_grp unsigned long slice_end[2];
/* Some throttle limits got updated for the group */ - bool limits_changed; + int limits_changed; };
struct throtl_data @@@ -102,7 -102,7 +102,7 @@@ /* Work for dispatching throttled bios */ struct delayed_work throtl_work;
- bool limits_changed; + int limits_changed; };
enum tg_state_flags { @@@ -916,7 -916,7 +916,7 @@@ static void throtl_update_blkio_group_c /* * For all update functions, key should be a valid pointer because these * update functions are called under blkcg_lock, that means, blkg is - * valid and in turn key is valid. queue exit path can not race becuase + * valid and in turn key is valid. queue exit path can not race because * of blkcg_lock * * Can not take queue lock in update functions as queue lock under blkcg_lock diff --combined drivers/hid/hid-core.c index c597836,453e7e6..408c4be --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@@ -306,7 -306,7 +306,7 @@@ static int hid_parser_global(struct hid case HID_GLOBAL_ITEM_TAG_PUSH:
if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { - dbg_hid("global enviroment stack overflow\n"); + dbg_hid("global environment stack overflow\n"); return -1; }
@@@ -317,7 -317,7 +317,7 @@@ case HID_GLOBAL_ITEM_TAG_POP:
if (!parser->global_stack_ptr) { - dbg_hid("global enviroment stack underflow\n"); + dbg_hid("global environment stack underflow\n"); return -1; }
@@@ -1449,10 -1449,8 +1449,10 @@@ static const struct hid_device_id hid_h { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, diff --combined drivers/hid/hid-magicmouse.c index 418c399,7a61373..0ec91c1 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@@ -76,7 -76,7 +76,7 @@@ MODULE_PARM_DESC(report_undeciphered, " * This is true when single_touch_id is equal to NO_TOUCHES. If multiple touches * are down and the touch providing for single touch emulation is lifted, * single_touch_id is equal to SINGLE_TOUCH_UP. While single touch emulation is - * occuring, single_touch_id corresponds with the tracking id of the touch used. + * occurring, single_touch_id corresponds with the tracking id of the touch used. */ #define NO_TOUCHES -1 #define SINGLE_TOUCH_UP -2 @@@ -418,8 -418,6 +418,8 @@@ static void magicmouse_setup_input(stru input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 2565, 4, 0); } + + input_set_events_per_packet(input, 60); }
if (report_undeciphered) { diff --combined drivers/macintosh/therm_pm72.c index c987033,d25df48..bb8b722 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@@ -91,7 -91,7 +91,7 @@@ * * Mar. 10, 2005 : 1.2 * - Add basic support for Xserve G5 - * - Retreive pumps min/max from EEPROM image in device-tree (broken) + * - Retrieve pumps min/max from EEPROM image in device-tree (broken) * - Use min/max macros here or there * - Latest darwin updated U3H min fan speed to 20% PWM * @@@ -153,7 -153,7 +153,7 @@@ static struct i2c_adapter * u3_0 static struct i2c_adapter * u3_1; static struct i2c_adapter * k2; static struct i2c_client * fcu; -static struct cpu_pid_state cpu_state[2]; +static struct cpu_pid_state processor_state[2]; static struct basckside_pid_params backside_params; static struct backside_pid_state backside_state; static struct drives_pid_state drives_state; @@@ -375,7 -375,7 +375,7 @@@ static int read_smon_adc(struct cpu_pid rc = i2c_master_send(state->monitor, buf, 2); if (rc <= 0) goto error; - /* Wait for convertion */ + /* Wait for conversion */ msleep(1); /* Switch to data register */ buf[0] = 4; @@@ -664,8 -664,8 +664,8 @@@ static int read_eeprom(int cpu, struct
static void fetch_cpu_pumps_minmax(void) { - struct cpu_pid_state *state0 = &cpu_state[0]; - struct cpu_pid_state *state1 = &cpu_state[1]; + struct cpu_pid_state *state0 = &processor_state[0]; + struct cpu_pid_state *state1 = &processor_state[1]; u16 pump_min = 0, pump_max = 0xffff; u16 tmp[4];
@@@ -717,17 -717,17 +717,17 @@@ static ssize_t show_##name(struct devic return sprintf(buf, "%d", data); \ }
-BUILD_SHOW_FUNC_FIX(cpu0_temperature, cpu_state[0].last_temp) -BUILD_SHOW_FUNC_FIX(cpu0_voltage, cpu_state[0].voltage) -BUILD_SHOW_FUNC_FIX(cpu0_current, cpu_state[0].current_a) -BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, cpu_state[0].rpm) -BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, cpu_state[0].intake_rpm) +BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp) +BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage) +BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a) +BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm) +BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
-BUILD_SHOW_FUNC_FIX(cpu1_temperature, cpu_state[1].last_temp) -BUILD_SHOW_FUNC_FIX(cpu1_voltage, cpu_state[1].voltage) -BUILD_SHOW_FUNC_FIX(cpu1_current, cpu_state[1].current_a) -BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, cpu_state[1].rpm) -BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, cpu_state[1].intake_rpm) +BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp) +BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage) +BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a) +BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm) +BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp) BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm) @@@ -919,8 -919,8 +919,8 @@@ static void do_cpu_pid(struct cpu_pid_s
static void do_monitor_cpu_combined(void) { - struct cpu_pid_state *state0 = &cpu_state[0]; - struct cpu_pid_state *state1 = &cpu_state[1]; + struct cpu_pid_state *state0 = &processor_state[0]; + struct cpu_pid_state *state1 = &processor_state[1]; s32 temp0, power0, temp1, power1; s32 temp_combi, power_combi; int rc, intake, pump; @@@ -1150,7 -1150,7 +1150,7 @@@ static void do_monitor_cpu_rack(struct /* * Initialize the state structure for one CPU control loop */ -static int init_cpu_state(struct cpu_pid_state *state, int index) +static int init_processor_state(struct cpu_pid_state *state, int index) { int err;
@@@ -1192,7 -1192,7 +1192,7 @@@ err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm); } if (err) - printk(KERN_WARNING "Failed to create some of the atribute" + printk(KERN_WARNING "Failed to create some of the attribute" "files for CPU %d\n", index);
return 0; @@@ -1205,7 -1205,7 +1205,7 @@@ /* * Dispose of the state data for one CPU control loop */ -static void dispose_cpu_state(struct cpu_pid_state *state) +static void dispose_processor_state(struct cpu_pid_state *state) { if (state->monitor == NULL) return; @@@ -1804,9 -1804,9 +1804,9 @@@ static int main_control_loop(void *x set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
/* Initialize ADCs */ - initialize_adc(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - initialize_adc(&cpu_state[1]); + initialize_adc(&processor_state[0]); + if (processor_state[1].monitor != NULL) + initialize_adc(&processor_state[1]);
fcu_tickle_ticks = FCU_TICKLE_TICKS;
@@@ -1833,14 -1833,14 +1833,14 @@@ if (cpu_pid_type == CPU_PID_TYPE_COMBINED) do_monitor_cpu_combined(); else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) { - do_monitor_cpu_rack(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - do_monitor_cpu_rack(&cpu_state[1]); + do_monitor_cpu_rack(&processor_state[0]); + if (processor_state[1].monitor != NULL) + do_monitor_cpu_rack(&processor_state[1]); // better deal with UP } else { - do_monitor_cpu_split(&cpu_state[0]); - if (cpu_state[1].monitor != NULL) - do_monitor_cpu_split(&cpu_state[1]); + do_monitor_cpu_split(&processor_state[0]); + if (processor_state[1].monitor != NULL) + do_monitor_cpu_split(&processor_state[1]); // better deal with UP } /* Then, the rest */ @@@ -1885,8 -1885,8 +1885,8 @@@ */ static void dispose_control_loops(void) { - dispose_cpu_state(&cpu_state[0]); - dispose_cpu_state(&cpu_state[1]); + dispose_processor_state(&processor_state[0]); + dispose_processor_state(&processor_state[1]); dispose_backside_state(&backside_state); dispose_drives_state(&drives_state); dispose_slots_state(&slots_state); @@@ -1928,12 -1928,12 +1928,12 @@@ static int create_control_loops(void /* Create control loops for everything. If any fail, everything * fails */ - if (init_cpu_state(&cpu_state[0], 0)) + if (init_processor_state(&processor_state[0], 0)) goto fail; if (cpu_pid_type == CPU_PID_TYPE_COMBINED) fetch_cpu_pumps_minmax();
- if (cpu_count > 1 && init_cpu_state(&cpu_state[1], 1)) + if (cpu_count > 1 && init_processor_state(&processor_state[1], 1)) goto fail; if (init_backside_state(&backside_state)) goto fail; diff --combined drivers/media/radio/wl128x/fmdrv_common.c index ecfd9fb,26fb9cb..5991ab6 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@@ -352,7 -352,7 +352,7 @@@ static void send_tasklet(unsigned long if (!atomic_read(&fmdev->tx_cnt)) return;
- /* Check, is there any timeout happenned to last transmitted packet */ + /* Check, is there any timeout happened to last transmitted packet */ if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) { fmerr("TX timeout occurred\n"); atomic_set(&fmdev->tx_cnt, 1); @@@ -478,7 -478,7 +478,7 @@@ u32 fmc_send_cmd(struct fmdev *fmdev, u return -ETIMEDOUT; } if (!fmdev->resp_skb) { - fmerr("Reponse SKB is missing\n"); + fmerr("Response SKB is missing\n"); return -EFAULT; } spin_lock_irqsave(&fmdev->resp_skb_lock, flags); @@@ -1494,17 -1494,12 +1494,17 @@@ u32 fmc_prepare(struct fmdev *fmdev }
memset(&fm_st_proto, 0, sizeof(fm_st_proto)); - fm_st_proto.type = ST_FM; fm_st_proto.recv = fm_st_receive; fm_st_proto.match_packet = NULL; fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb; fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */ fm_st_proto.priv_data = fmdev; + fm_st_proto.chnl_id = 0x08; + fm_st_proto.max_frame_size = 0xff; + fm_st_proto.hdr_len = 1; + fm_st_proto.offset_len_in_hdr = 0; + fm_st_proto.len_size = 1; + fm_st_proto.reserve = 1;
ret = st_register(&fm_st_proto); if (ret == -EINPROGRESS) { @@@ -1537,7 -1532,7 +1537,7 @@@ g_st_write = fm_st_proto.write; } else { fmerr("Failed to get ST write func pointer\n"); - ret = st_unregister(ST_FM); + ret = st_unregister(&fm_st_proto); if (ret < 0) fmerr("st_unregister failed %d\n", ret); return -EAGAIN; @@@ -1591,14 -1586,13 +1591,14 @@@ */ u32 fmc_release(struct fmdev *fmdev) { + static struct st_proto_s fm_st_proto; u32 ret;
if (!test_bit(FM_CORE_READY, &fmdev->flag)) { fmdbg("FM Core is already down\n"); return 0; } - /* Sevice pending read */ + /* Service pending read */ wake_up_interruptible(&fmdev->rx.rds.read_queue);
tasklet_kill(&fmdev->tx_task); @@@ -1610,11 -1604,7 +1610,11 @@@ fmdev->resp_comp = NULL; fmdev->rx.freq = 0;
- ret = st_unregister(ST_FM); + memset(&fm_st_proto, 0, sizeof(fm_st_proto)); + fm_st_proto.chnl_id = 0x08; + + ret = st_unregister(&fm_st_proto); + if (ret < 0) fmerr("Failed to de-register FM from ST %d\n", ret); else diff --combined drivers/net/atlx/atl2.c index 937ef1a,f46ee45..e3cbf45 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@@ -1701,7 -1701,7 +1701,7 @@@ static struct pci_driver atl2_driver = .id_table = atl2_pci_tbl, .probe = atl2_probe, .remove = __devexit_p(atl2_remove), - /* Power Managment Hooks */ + /* Power Management Hooks */ .suspend = atl2_suspend, #ifdef CONFIG_PM .resume = atl2_resume, @@@ -1996,15 -1996,13 +1996,15 @@@ static int atl2_set_eeprom(struct net_d if (!eeprom_buff) return -ENOMEM;
- ptr = (u32 *)eeprom_buff; + ptr = eeprom_buff;
if (eeprom->offset & 3) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ - if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) - return -EIO; + if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } ptr++; } if (((eeprom->offset + eeprom->len) & 3)) { @@@ -2013,22 -2011,18 +2013,22 @@@ * only the first byte of the word is being modified */ if (!atl2_read_eeprom(hw, last_dword * 4, - &(eeprom_buff[last_dword - first_dword]))) - return -EIO; + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } }
/* Device's eeprom is always little-endian, word addressable */ memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_dword - first_dword + 1; i++) { - if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) - return -EIO; + if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } } - + out: kfree(eeprom_buff); return ret_val; } diff --combined drivers/net/bonding/bond_alb.h index 4b3e358,b3bc750..86861f0 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@@ -74,9 -74,9 +74,9 @@@ struct tlb_client_info * packets to a Client that the Hash function * gave this entry index. */ - u32 tx_bytes; /* Each Client acumulates the BytesTx that + u32 tx_bytes; /* Each Client accumulates the BytesTx that * were tranmitted to it, and after each - * CallBack the LoadHistory is devided + * CallBack the LoadHistory is divided * by the balance interval */ u32 load_history; /* This field contains the amount of Bytes diff --combined drivers/net/irda/via-ircc.c index 186cd28,fc89685..f504b26 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@@ -29,7 -29,7 +29,7 @@@ F02 Oct/28/02: Add SB device ID for 314
2004-02-16: sda@bdit.de - Removed unneeded 'legacy' pci stuff. - - Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff. + - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff. - On speed change from core, don't send SIR frame with new speed. Use current speed and change speeds later. - Make module-param dongle_id actually work. @@@ -75,9 -75,15 +75,9 @@@ static int dongle_id = 0; /* default: p /* We can't guess the type of connected dongle, user *must* supply it. */ module_param(dongle_id, int, 0);
-/* FIXME : we should not need this, because instances should be automatically - * managed by the PCI layer. Especially that we seem to only be using the - * first entry. Jean II */ -/* Max 4 instances for now */ -static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL }; - /* Some prototypes */ -static int via_ircc_open(int i, chipio_t * info, unsigned int id); -static int via_ircc_close(struct via_ircc_cb *self); +static int via_ircc_open(struct pci_dev *pdev, chipio_t * info, + unsigned int id); static int via_ircc_dma_receive(struct via_ircc_cb *self); static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, int iobase); @@@ -209,7 -215,7 +209,7 @@@ static int __devinit via_init_one (stru pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); pci_write_config_byte(pcidev,0x5a,0xc0); WriteLPCReg(0x28, 0x70 ); - if (via_ircc_open(0, &info,0x3076) == 0) + if (via_ircc_open(pcidev, &info, 0x3076) == 0) rc=0; } else rc = -ENODEV; //IR not turn on @@@ -248,7 -254,7 +248,7 @@@ info.irq=FirIRQ; info.dma=FirDRQ1; info.dma2=FirDRQ0; - if (via_ircc_open(0, &info,0x3096) == 0) + if (via_ircc_open(pcidev, &info, 0x3096) == 0) rc=0; } else rc = -ENODEV; //IR not turn on !!!!! @@@ -258,10 -264,48 +258,10 @@@ return rc; }
-/* - * Function via_ircc_clean () - * - * Close all configured chips - * - */ -static void via_ircc_clean(void) -{ - int i; - - IRDA_DEBUG(3, "%s()\n", __func__); - - for (i=0; i < ARRAY_SIZE(dev_self); i++) { - if (dev_self[i]) - via_ircc_close(dev_self[i]); - } -} - -static void __devexit via_remove_one (struct pci_dev *pdev) -{ - IRDA_DEBUG(3, "%s()\n", __func__); - - /* FIXME : This is ugly. We should use pci_get_drvdata(pdev); - * to get our driver instance and call directly via_ircc_close(). - * See vlsi_ir for details... - * Jean II */ - via_ircc_clean(); - - /* FIXME : This should be in via_ircc_close(), because here we may - * theoritically disable still configured devices :-( - Jean II */ - pci_disable_device(pdev); -} - static void __exit via_ircc_cleanup(void) { IRDA_DEBUG(3, "%s()\n", __func__);
- /* FIXME : This should be redundant, as pci_unregister_driver() - * should call via_remove_one() on each device. - * Jean II */ - via_ircc_clean(); - /* Cleanup all instances of the driver */ pci_unregister_driver (&via_driver); } @@@ -280,13 -324,12 +280,13 @@@ static const struct net_device_ops via_ };
/* - * Function via_ircc_open (iobase, irq) + * Function via_ircc_open(pdev, iobase, irq) * * Open driver instance * */ -static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id) +static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info, + unsigned int id) { struct net_device *dev; struct via_ircc_cb *self; @@@ -294,6 -337,9 +294,6 @@@
IRDA_DEBUG(3, "%s()\n", __func__);
- if (i >= ARRAY_SIZE(dev_self)) - return -ENOMEM; - /* Allocate new instance of the driver */ dev = alloc_irdadev(sizeof(struct via_ircc_cb)); if (dev == NULL) @@@ -303,8 -349,13 +303,8 @@@ self->netdev = dev; spin_lock_init(&self->lock);
- /* FIXME : We should store our driver instance in the PCI layer, - * using pci_set_drvdata(), not in this array. - * See vlsi_ir for details... - Jean II */ - /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */ - /* Need to store self somewhere */ - dev_self[i] = self; - self->index = i; + pci_set_drvdata(pdev, self); + /* Initialize Resource */ self->io.cfg_base = info->cfg_base; self->io.fir_base = info->fir_base; @@@ -334,7 -385,7 +334,7 @@@ self->io.dongle_id = dongle_id;
/* The only value we must override it the baudrate */ - /* Maximum speeds and capabilities are dongle-dependant. */ + /* Maximum speeds and capabilities are dongle-dependent. */ switch( self->io.dongle_id ){ case 0x0d: self->qos.baud_rate.bits = @@@ -363,7 -414,7 +363,7 @@@
/* Allocate memory if needed */ self->rx_buff.head = - dma_alloc_coherent(NULL, self->rx_buff.truesize, + dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; @@@ -372,7 -423,7 +372,7 @@@ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head = - dma_alloc_coherent(NULL, self->tx_buff.truesize, + dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; @@@ -404,32 -455,33 +404,32 @@@ via_hw_init(self); return 0; err_out4: - dma_free_coherent(NULL, self->tx_buff.truesize, + dma_free_coherent(&pdev->dev, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); err_out3: - dma_free_coherent(NULL, self->rx_buff.truesize, + dma_free_coherent(&pdev->dev, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); err_out2: release_region(self->io.fir_base, self->io.fir_ext); err_out1: + pci_set_drvdata(pdev, NULL); free_netdev(dev); - dev_self[i] = NULL; return err; }
/* - * Function via_ircc_close (self) + * Function via_remove_one(pdev) * * Close driver instance * */ -static int via_ircc_close(struct via_ircc_cb *self) +static void __devexit via_remove_one(struct pci_dev *pdev) { + struct via_ircc_cb *self = pci_get_drvdata(pdev); int iobase;
IRDA_DEBUG(3, "%s()\n", __func__);
- IRDA_ASSERT(self != NULL, return -1;); - iobase = self->io.fir_base;
ResetChip(iobase, 5); //hardware reset. @@@ -441,16 -493,16 +441,16 @@@ __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) - dma_free_coherent(NULL, self->tx_buff.truesize, + dma_free_coherent(&pdev->dev, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); if (self->rx_buff.head) - dma_free_coherent(NULL, self->rx_buff.truesize, + dma_free_coherent(&pdev->dev, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); - dev_self[self->index] = NULL; + pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
- return 0; + pci_disable_device(pdev); }
/* diff --combined drivers/net/mlx4/mcg.c index c6d336a,279521a..e63c37d --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c @@@ -111,7 -111,7 +111,7 @@@ static int new_steering_entry(struct ml u32 members_count; struct mlx4_steer_index *new_entry; struct mlx4_promisc_qp *pqp; - struct mlx4_promisc_qp *dqp; + struct mlx4_promisc_qp *dqp = NULL; u32 prot; int err; u8 pf_num; @@@ -184,7 -184,7 +184,7 @@@ out_mailbox out_alloc: if (dqp) { list_del(&dqp->list); - kfree(&dqp); + kfree(dqp); } list_del(&new_entry->list); kfree(new_entry); @@@ -222,7 -222,7 +222,7 @@@ static int existing_steering_entry(stru
/* the given qpn is listed as a promisc qpn * we need to add it as a duplicate to this entry - * for future refernce */ + * for future references */ list_for_each_entry(dqp, &entry->duplicates, list) { if (qpn == dqp->qpn) return 0; /* qp is already duplicated */ @@@ -469,6 -469,7 +469,6 @@@ static int remove_promisc_qp(struct mlx
/*remove from list of promisc qps */ list_del(&pqp->list); - kfree(pqp);
/* set the default entry not to include the removed one */ mailbox = mlx4_alloc_cmd_mailbox(dev); @@@ -527,8 -528,6 +527,8 @@@ out_mailbox out_list: if (back_to_list) list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); + else + kfree(pqp); out_mutex: mutex_unlock(&priv->mcg_table.mutex); return err; diff --combined drivers/net/phy/phy_device.c index e870c06,ff61293..ff109fe --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@@ -442,11 -442,11 +442,11 @@@ static int phy_attach_direct(struct net u32 flags, phy_interface_t interface) { struct device *d = &phydev->dev; + int err;
/* Assume that if there is no driver, that it doesn't * exist, and we should use the genphy driver. */ if (NULL == d->driver) { - int err; d->driver = &genphy_driver.driver;
err = d->driver->probe(d); @@@ -474,11 -474,7 +474,11 @@@ /* Do initial configuration here, now that * we have certain key parameters * (dev_flags and interface) */ - return phy_init_hw(phydev); + err = phy_init_hw(phydev); + if (err) + phy_detach(phydev); + + return err; }
/** @@@ -538,7 -534,7 +538,7 @@@ EXPORT_SYMBOL(phy_detach) /* Generic PHY support and helper functions */
/** - * genphy_config_advert - sanitize and advertise auto-negotation parameters + * genphy_config_advert - sanitize and advertise auto-negotiation parameters * @phydev: target phy_device struct * * Description: Writes MII_ADVERTISE with the appropriate values, @@@ -687,7 -683,7 +687,7 @@@ int genphy_config_aneg(struct phy_devic return result;
if (result == 0) { - /* Advertisment hasn't changed, but maybe aneg was never on to + /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ int ctl = phy_read(phydev, MII_BMCR);
diff --combined drivers/net/usb/cdc_eem.c index 8f12854,5552847..882f53f --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@@ -190,7 -190,7 +190,7 @@@ static int eem_rx_fixup(struct usbnet *
/* * EEM packet header format: - * b0..14: EEM type dependant (Data or Command) + * b0..14: EEM type dependent (Data or Command) * b15: bmType */ header = get_unaligned_le16(skb->data); @@@ -340,7 -340,7 +340,7 @@@ next
static const struct driver_info eem_info = { .description = "CDC EEM Device", - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = eem_bind, .rx_fixup = eem_rx_fixup, .tx_fixup = eem_tx_fixup, diff --combined drivers/net/wireless/ath/carl9170/carl9170.h index 9cad061,c01c43d..3d4ed58 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@@ -161,7 -161,7 +161,7 @@@ struct carl9170_sta_tid * Naturally: The higher the limit, the faster the device CAN send. * However, even a slight over-commitment at the wrong time and the * hardware is doomed to send all already-queued frames at suboptimal - * rates. This in turn leads to an enourmous amount of unsuccessful + * rates. This in turn leads to an enormous amount of unsuccessful * retries => Latency goes up, whereas the throughput goes down. CRASH! */ #define CARL9170_NUM_TX_LIMIT_HARD ((AR9170_TXQ_DEPTH * 3) / 2) @@@ -443,7 -443,6 +443,7 @@@ struct carl9170_ba_stats u8 ampdu_len; u8 ampdu_ack_len; bool clear; + bool req; };
struct carl9170_sta_info { diff --combined drivers/net/wireless/iwlegacy/iwl-core.c index a209a0e,1c3a8cb..7007d61 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@@ -1030,7 -1030,7 +1030,7 @@@ int iwl_legacy_apm_init(struct iwl_pri /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s - * NOTE: This is no-op for 3945 (non-existant bit) + * NOTE: This is no-op for 3945 (non-existent bit) */ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); @@@ -1805,15 -1805,6 +1805,15 @@@ iwl_legacy_mac_change_interface(struct
mutex_lock(&priv->mutex);
+ if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
if (!(interface_modes & BIT(newtype))) { @@@ -1841,7 -1832,6 +1841,7 @@@ /* success */ iwl_legacy_teardown_interface(priv, vif, true); vif->type = newtype; + vif->p2p = newp2p; err = iwl_legacy_setup_interface(priv, ctx); WARN_ON(err); /* diff --combined drivers/usb/serial/opticon.c index a1dd4d4,a6f63cc..1b5633f --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@@ -116,7 -116,7 +116,7 @@@ static void opticon_read_bulk_callback( } else { if ((data[0] == 0x00) && (data[1] == 0x01)) { spin_lock_irqsave(&priv->lock, flags); - /* CTS status infomation package */ + /* CTS status information package */ if (data[2] == 0x00) priv->cts = false; else @@@ -413,7 -413,7 +413,7 @@@ static int opticon_tiocmget(struct tty_ return result; }
-static int opticon_tiocmset(struct tty_struct *tty, struct file *file, +static int opticon_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; diff --combined fs/btrfs/inode.c index 6541339,80920bc..5cc64ab --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@@ -112,7 -112,6 +112,7 @@@ static int btrfs_init_inode_security(st static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, + int compress_type, struct page **compressed_pages) { struct btrfs_key key; @@@ -127,9 -126,12 +127,9 @@@ size_t cur_size = size; size_t datasize; unsigned long offset; - int compress_type = BTRFS_COMPRESS_NONE;
- if (compressed_size && compressed_pages) { - compress_type = root->fs_info->compress_type; + if (compressed_size && compressed_pages) cur_size = compressed_size; - }
path = btrfs_alloc_path(); if (!path) @@@ -219,7 -221,7 +219,7 @@@ fail static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, - size_t compressed_size, + size_t compressed_size, int compress_type, struct page **compressed_pages) { u64 isize = i_size_read(inode); @@@ -252,7 -254,7 +252,7 @@@ inline_len = min_t(u64, isize, actual_end); ret = insert_inline_extent(trans, root, inode, start, inline_len, compressed_size, - compressed_pages); + compress_type, compressed_pages); BUG_ON(ret); btrfs_delalloc_release_metadata(inode, end + 1 - start); btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); @@@ -431,13 -433,12 +431,13 @@@ again * to make an uncompressed inline extent. */ ret = cow_file_range_inline(trans, root, inode, - start, end, 0, NULL); + start, end, 0, 0, NULL); } else { /* try making a compressed inline extent */ ret = cow_file_range_inline(trans, root, inode, start, end, - total_compressed, pages); + total_compressed, + compress_type, pages); } if (ret == 0) { /* @@@ -791,7 -792,7 +791,7 @@@ static noinline int cow_file_range(stru if (start == 0) { /* lets try to make an inline extent */ ret = cow_file_range_inline(trans, root, inode, - start, end, 0, NULL); + start, end, 0, 0, NULL); if (ret == 0) { extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, @@@ -2221,6 -2222,8 +2221,6 @@@ int btrfs_orphan_add(struct btrfs_trans insert = 1; #endif insert = 1; - } else { - WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved); }
if (!BTRFS_I(inode)->orphan_meta_reserved) { @@@ -2321,7 -2324,7 +2321,7 @@@ int btrfs_orphan_cleanup(struct btrfs_r
/* * if ret == 0 means we found what we were searching for, which - * is weird, but possible, so only screw with path if we didnt + * is weird, but possible, so only screw with path if we didn't * find the key and see if we have stuff that matches */ if (ret > 0) { @@@ -2534,6 -2537,8 +2534,6 @@@ static void btrfs_read_locked_inode(str BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
alloc_group_block = btrfs_inode_block_group(leaf, inode_item); - if (location.objectid == BTRFS_FREE_SPACE_OBJECTID) - inode->i_mapping->flags &= ~__GFP_FS;
/* * try to precache a NULL acl entry for files that don't have @@@ -6955,10 -6960,8 +6955,10 @@@ static int btrfs_rename(struct inode *o * should cover the worst case number of items we'll modify. */ trans = btrfs_start_transaction(root, 20); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_notrans; + }
btrfs_set_trans_block_group(trans, new_dir);
@@@ -7058,7 -7061,7 +7058,7 @@@ } out_fail: btrfs_end_transaction_throttle(trans, root); - +out_notrans: if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem);
diff --combined fs/notify/inotify/inotify_fsnotify.c index 0348d0c,7400898..e3cbd74 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@@ -194,11 -194,10 +194,11 @@@ static int idr_callback(int id, void *p
static void inotify_free_group_priv(struct fsnotify_group *group) { - /* ideally the idr is empty and we won't hit the BUG in teh callback */ + /* ideally the idr is empty and we won't hit the BUG in the callback */ idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_remove_all(&group->inotify_data.idr); idr_destroy(&group->inotify_data.idr); + atomic_dec(&group->inotify_data.user->inotify_devs); free_uid(group->inotify_data.user); }
diff --combined include/drm/drm_crtc.h index aaec097,e2ed98b..d94684b --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@@ -65,7 -65,7 +65,7 @@@ enum drm_mode_status MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ MODE_BAD_WIDTH, /* requires an unsupported linepitch */ - MODE_NOMODE, /* no mode with a maching name */ + MODE_NOMODE, /* no mode with a matching name */ MODE_NO_INTERLACE, /* interlaced mode not supported */ MODE_NO_DBLESCAN, /* doublescan mode not supported */ MODE_NO_VSCAN, /* multiscan mode not supported */ @@@ -321,7 -321,7 +321,7 @@@ struct drm_crtc_funcs
/* * Flip to the given framebuffer. This implements the page - * flip ioctl descibed in drm_mode.h, specifically, the + * flip ioctl described in drm_mode.h, specifically, the * implementation must return immediately and block all * rendering to the current fb until the flip has completed. * If userspace set the event flag in the ioctl, the event @@@ -778,7 -778,6 +778,7 @@@ extern int drm_mode_gamma_get_ioctl(str void *data, struct drm_file *file_priv); extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern u8 *drm_find_cea_extension(struct edid *edid); extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, diff --combined include/drm/ttm/ttm_bo_api.h index a6bd117,94e2ce4..62a0e4c --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@@ -50,10 -50,10 +50,10 @@@ struct drm_mm_node * * @fpfn: first valid page frame number to put the object * @lpfn: last valid page frame number to put the object - * @num_placement: number of prefered placements - * @placement: prefered placements - * @num_busy_placement: number of prefered placements when need to evict buffer - * @busy_placement: prefered placements when need to evict buffer + * @num_placement: number of preferred placements + * @placement: preferred placements + * @num_busy_placement: number of preferred placements when need to evict buffer + * @busy_placement: preferred placements when need to evict buffer * * Structure indicating the placement you request for an object. */ @@@ -158,7 -158,7 +158,7 @@@ struct ttm_tt * the object is destroyed. * @event_queue: Queue for processes waiting on buffer object status change. * @mem: structure describing current placement. - * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. * @ttm: TTM structure holding system pages. @@@ -221,7 -221,7 +221,7 @@@ struct ttm_buffer_object */
struct ttm_mem_reg mem; - struct file *persistant_swap_storage; + struct file *persistent_swap_storage; struct ttm_tt *ttm; bool evicted;
@@@ -459,7 -459,7 +459,7 @@@ extern void ttm_bo_synccpu_write_releas * user buffer object. * @interruptible: If needing to sleep to wait for GPU resources, * sleep interruptible. - * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would * point to the shmem object backing a GEM object if TTM is used to back a @@@ -490,7 -490,7 +490,7 @@@ extern int ttm_bo_init(struct ttm_bo_de uint32_t page_alignment, unsigned long buffer_start, bool interrubtible, - struct file *persistant_swap_storage, + struct file *persistent_swap_storage, size_t acc_size, void (*destroy) (struct ttm_buffer_object *)); /** @@@ -506,7 -506,7 +506,7 @@@ * user buffer object. * @interruptible: If needing to sleep while waiting for GPU resources, * sleep interruptible. - * @persistant_swap_storage: Usually the swap storage is deleted for buffers + * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would * point to the shmem object backing a GEM object if TTM is used to back a @@@ -528,7 -528,7 +528,7 @@@ extern int ttm_bo_create(struct ttm_bo_ uint32_t page_alignment, unsigned long buffer_start, bool interruptible, - struct file *persistant_swap_storage, + struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo);
/** diff --combined include/drm/ttm/ttm_bo_driver.h index 960b521,8b52c9a..09af2d7 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@@ -122,7 -122,7 +122,7 @@@ struct ttm_backend #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) #define TTM_PAGE_FLAG_WRITE (1 << 3) #define TTM_PAGE_FLAG_SWAPPED (1 << 4) -#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) +#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) #define TTM_PAGE_FLAG_DMA32 (1 << 7)
@@@ -223,9 -223,9 +223,9 @@@ struct ttm_mem_type_manager_func * @mem::mm_node should be set to a non-null value, and * @mem::start should be set to a value identifying the beginning * of the range allocated, and the function should return zero. - * If the memory region accomodate the buffer object, @mem::mm_node + * If the memory region accommodate the buffer object, @mem::mm_node * should be set to NULL, and the function should return 0. - * If a system error occured, preventing the request to be fulfilled, + * If a system error occurred, preventing the request to be fulfilled, * the function should return a negative error code. * * Note that @mem::mm_node will only be dereferenced by @@@ -714,7 -714,7 +714,7 @@@ extern void ttm_tt_cache_flush(struct p */ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); extern int ttm_tt_swapout(struct ttm_tt *ttm, - struct file *persistant_swap_storage); + struct file *persistent_swap_storage);
/* * ttm_bo.c @@@ -841,7 -841,7 +841,7 @@@ extern void ttm_mem_io_unlock(struct tt * different order, either by will or as a result of a buffer being evicted * to make room for a buffer already reserved. (Buffers are reserved before * they are evicted). The following algorithm prevents such deadlocks from - * occuring: + * occurring: * 1) Buffers are reserved with the lru spinlock held. Upon successful * reservation they are removed from the lru list. This stops a reserved buffer * from being evicted. However the lru spinlock is released between the time diff --combined include/linux/fs.h index 1b95af3,f03632d..dbd860a --- a/include/linux/fs.h +++ b/include/linux/fs.h @@@ -465,7 -465,7 +465,7 @@@ struct iattr struct timespec ia_ctime;
/* - * Not an attribute, but an auxilary info for filesystems wanting to + * Not an attribute, but an auxiliary info for filesystems wanting to * implement an ftruncate() like method. NOTE: filesystem should * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). */ @@@ -613,8 -613,6 +613,8 @@@ struct address_space_operations int (*error_remove_page)(struct address_space *, struct page *); };
+extern const struct address_space_operations empty_aops; + /* * pagecache_write_begin/pagecache_write_end must be used by general code * to write into the pagecache. @@@ -649,7 -647,7 +649,7 @@@ struct address_space } __attribute__((aligned(sizeof(long)))); /* * On most architectures that alignment is already the case; but - * must be enforced here for CRIS, to let the least signficant bit + * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */
diff --combined include/linux/perf_event.h index 04d75a8,393b60c..ee9f1e7 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@@ -662,7 -662,7 +662,7 @@@ struct pmu int (*commit_txn) (struct pmu *pmu); /* optional */ /* * Will cancel the transaction, assumes ->del() is called - * for each successfull ->add() during the transaction. + * for each successful ->add() during the transaction. */ void (*cancel_txn) (struct pmu *pmu); /* optional */ }; @@@ -1086,7 -1086,7 +1086,7 @@@ void perf_event_task_sched_out(struct t { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
- COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); + __perf_event_task_sched_out(task, next); }
extern void perf_event_mmap(struct vm_area_struct *vma); diff --combined include/linux/skbuff.h index d9e52fa,b759896..d0ae90a --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -126,7 -126,7 +126,7 @@@ struct sk_buff * GRO uses frags we allocate at least 16 regardless of page size. */ #if (65536/PAGE_SIZE + 2) < 16 -#define MAX_SKB_FRAGS 16 +#define MAX_SKB_FRAGS 16UL #else #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) #endif @@@ -474,7 -474,7 +474,7 @@@ static inline void skb_dst_set(struct s extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
/** - * skb_dst_is_noref - Test if skb dst isnt refcounted + * skb_dst_is_noref - Test if skb dst isn't refcounted * @skb: buffer */ static inline bool skb_dst_is_noref(const struct sk_buff *skb) diff --combined include/sound/soc-dapm.h index ddc2b3d,5534fdf..f72c103 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@@ -23,7 -23,7 +23,7 @@@ /* * SoC dynamic audio power management * - * We can have upto 4 power domains + * We can have up to 4 power domains * 1. Codec domain - VREF, VMID * Usually controlled at codec probe/remove, although can be set * at stream time if power is not needed for sidetone, etc. @@@ -45,25 -45,25 +45,25 @@@ /* platform domain */ #define SND_SOC_DAPM_INPUT(wname) \ { .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_OUTPUT(wname) \ { .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_MIC(wname, wevent) \ { .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} #define SND_SOC_DAPM_HP(wname, wevent) \ { .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_SPK(wname, wevent) \ { .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_LINE(wname, wevent) \ { .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
/* path domain */ @@@ -189,11 -189,11 +189,11 @@@ /* events that are pre and post DAPM */ #define SND_SOC_DAPM_PRE(wname, wevent) \ { .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_POST(wname, wevent) \ { .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */ diff --combined kernel/kexec.c index 4e240a3,e7e3d97..55936f9 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@@ -144,7 -144,7 +144,7 @@@ static int do_kimage_alloc(struct kimag /* Initialize the list of destination pages */ INIT_LIST_HEAD(&image->dest_pages);
- /* Initialize the list of unuseable pages */ + /* Initialize the list of unusable pages */ INIT_LIST_HEAD(&image->unuseable_pages);
/* Read in the segments */ @@@ -454,7 -454,7 +454,7 @@@ static struct page *kimage_alloc_normal /* Deal with the destination pages I have inadvertently allocated. * * Ideally I would convert multi-page allocations into single - * page allocations, and add everyting to image->dest_pages. + * page allocations, and add everything to image->dest_pages. * * For now it is simpler to just free the pages. */ @@@ -602,7 -602,7 +602,7 @@@ static void kimage_free_extra_pages(str /* Walk through and free any extra destination pages I may have */ kimage_free_page_list(&image->dest_pages);
- /* Walk through and free any unuseable pages I have cached */ + /* Walk through and free any unusable pages I have cached */ kimage_free_page_list(&image->unuseable_pages);
} @@@ -1099,8 -1099,7 +1099,8 @@@ size_t crash_get_memory_size(void return size; }
-static void free_reserved_phys_range(unsigned long begin, unsigned long end) +void __weak crash_free_reserved_phys_range(unsigned long begin, + unsigned long end) { unsigned long addr;
@@@ -1136,7 -1135,7 +1136,7 @@@ int crash_shrink_memory(unsigned long n start = roundup(start, PAGE_SIZE); end = roundup(start + new_size, PAGE_SIZE);
- free_reserved_phys_range(end, crashk_res.end); + crash_free_reserved_phys_range(end, crashk_res.end);
if ((start == end) && (crashk_res.parent != NULL)) release_resource(&crashk_res); diff --combined kernel/sched.c index a884551,865b433..dc7ca5c --- a/kernel/sched.c +++ b/kernel/sched.c @@@ -2309,7 -2309,7 +2309,7 @@@ unsigned long wait_task_inactive(struc * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * - * NOTE: this function doesnt have to take the runqueue lock, + * NOTE: this function doesn't have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been @@@ -4997,7 -4997,7 +4997,7 @@@ recheck */ raw_spin_lock_irqsave(&p->pi_lock, flags); /* - * To be able to change p->policy safely, the apropriate + * To be able to change p->policy safely, the appropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); @@@ -5011,17 -5011,6 +5011,17 @@@ return -EINVAL; }
+ /* + * If not changing anything there's no need to proceed further: + */ + if (unlikely(policy == p->policy && (!rt_policy(policy) || + param->sched_priority == p->rt_priority))) { + + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return 0; + } + #ifdef CONFIG_RT_GROUP_SCHED if (user) { /* @@@ -5716,7 -5705,7 +5716,7 @@@ void show_state_filter(unsigned long st do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow - * console might take alot of time: + * console might take a lot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) diff --combined kernel/sched_fair.c index c7ec5c8,3cb7f07..4104533 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@@ -22,7 -22,6 +22,7 @@@
#include <linux/latencytop.h> #include <linux/sched.h> +#include <linux/cpumask.h>
/* * Targeted preemption latency for CPU-bound tasks: @@@ -3062,7 -3061,7 +3062,7 @@@ static inline void calculate_imbalance(
/* * if *imbalance is less than the average load per runnable task - * there is no gaurantee that any tasks will be moved so we'll have + * there is no guarantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ @@@ -3851,8 -3850,8 +3851,8 @@@ static void rebalance_domains(int cpu, interval = msecs_to_jiffies(interval); if (unlikely(!interval)) interval = 1; - if (interval > HZ*NR_CPUS/10) - interval = HZ*NR_CPUS/10; + if (interval > HZ*num_online_cpus()/10) + interval = HZ*num_online_cpus()/10;
need_serialize = sd->flags & SD_SERIALIZE;
diff --combined kernel/signal.c index dc17929,f486d10..29e233f --- a/kernel/signal.c +++ b/kernel/signal.c @@@ -226,7 -226,7 +226,7 @@@ static inline void print_dropped_signal /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an - * appopriate lock must be held to stop the target task from exiting + * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) @@@ -375,15 -375,15 +375,15 @@@ int unhandled_signal(struct task_struc return !tracehook_consider_fatal_signal(tsk, sig); }
- -/* Notify the system that a driver wants to block all signals for this +/* + * Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be * sent/acted upon. If the notifier routine returns non-zero, then the * signal will be acted upon after all. If the notifier routine returns 0, * then then signal will be blocked. Only one block per process is * allowed. priv is a pointer to private data that the notifier routine - * can use to determine if the signal should be blocked or not. */ - + * can use to determine if the signal should be blocked or not. + */ void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { @@@ -434,10 -434,9 +434,10 @@@ still_pending copy_siginfo(info, &first->info); __sigqueue_free(first); } else { - /* Ok, it wasn't in the queue. This must be - a fast-pathed signal or we must have been - out of queue space. So zero out the info. + /* + * Ok, it wasn't in the queue. This must be + * a fast-pathed signal or we must have been + * out of queue space. So zero out the info. */ info->si_signo = sig; info->si_errno = 0; @@@ -469,7 -468,7 +469,7 @@@ static int __dequeue_signal(struct sigp }
/* - * Dequeue a signal and return the element to the caller, which is + * Dequeue a signal and return the element to the caller, which is * expected to free it. * * All callers have to hold the siglock. @@@ -491,7 -490,7 +491,7 @@@ int dequeue_signal(struct task_struct * * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is - * compliant with the old way of self restarting + * compliant with the old way of self-restarting * itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is @@@ -924,15 -923,14 +924,15 @@@ static int __send_signal(int sig, struc if (info == SEND_SIG_FORCED) goto out_set;
- /* Real-time signals must be queued if sent by sigqueue, or - some other real-time mechanism. It is implementation - defined whether kill() does so. We attempt to do so, on - the principle of least surprise, but since kill is not - allowed to fail with EAGAIN when low on memory we just - make sure at least one signal gets delivered and don't - pass on the info struct. */ - + /* + * Real-time signals must be queued if sent by sigqueue, or + * some other real-time mechanism. It is implementation + * defined whether kill() does so. We attempt to do so, on + * the principle of least surprise, but since kill is not + * allowed to fail with EAGAIN when low on memory we just + * make sure at least one signal gets delivered and don't + * pass on the info struct. + */ if (sig < SIGRTMIN) override_rlimit = (is_si_special(info) || info->si_code >= 0); else @@@ -1203,7 -1201,8 +1203,7 @@@ retry return error; }
-int -kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); @@@ -1300,7 -1299,8 +1300,7 @@@ static int kill_something_info(int sig * These are for backward compatibility with the rest of the kernel source. */
-int -send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { /* * Make sure legacy kernel users don't send in bad values @@@ -1368,7 -1368,7 +1368,7 @@@ EXPORT_SYMBOL(kill_pid) * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer - * expirations or I/O completions". In the case of Posix Timers + * expirations or I/O completions". In the case of POSIX Timers * we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. @@@ -1553,7 -1553,7 +1553,7 @@@ static void do_notify_parent_cldstop(st info.si_signo = SIGCHLD; info.si_errno = 0; /* - * see comment in do_notify_parent() abot the following 3 lines + * see comment in do_notify_parent() about the following 4 lines */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); @@@ -1611,7 -1611,7 +1611,7 @@@ static inline int may_ptrace_stop(void }
/* - * Return nonzero if there is a SIGKILL that should be waking us up. + * Return non-zero if there is a SIGKILL that should be waking us up. * Called with the siglock held. */ static int sigkill_pending(struct task_struct *tsk) @@@ -1735,7 -1735,7 +1735,7 @@@ void ptrace_notify(int exit_code /* * This performs the stopping for SIGSTOP and other stop signals. * We have to stop all threads in the thread group. - * Returns nonzero if we've actually stopped and released the siglock. + * Returns non-zero if we've actually stopped and released the siglock. * Returns zero if we didn't stop and still hold the siglock. */ static int do_signal_stop(int signr) @@@ -1823,12 -1823,10 +1823,12 @@@ static int ptrace_signal(int signr, sig
current->exit_code = 0;
- /* Update the siginfo structure if the signal has - changed. If the debugger wanted something - specific in the siginfo structure then it should - have updated *info via PTRACE_SETSIGINFO. */ + /* + * Update the siginfo structure if the signal has + * changed. If the debugger wanted something + * specific in the siginfo structure then it should + * have updated *info via PTRACE_SETSIGINFO. + */ if (signr != info->si_signo) { info->si_signo = signr; info->si_errno = 0; @@@ -1887,7 -1885,7 +1887,7 @@@ relock for (;;) { struct k_sigaction *ka; /* - * Tracing can induce an artifical signal and choose sigaction. + * Tracing can induce an artificial signal and choose sigaction. * The return value in @signr determines the default action, * but @info->si_signo is the signal number we will report. */ @@@ -2036,8 -2034,7 +2036,8 @@@ void exit_signals(struct task_struct *t if (!signal_pending(tsk)) goto out;
- /* It could be that __group_complete_signal() choose us to + /* + * It could be that __group_complete_signal() choose us to * notify about group-wide signal. Another thread should be * woken now to take the signal since we will not. */ @@@ -2075,9 -2072,6 +2075,9 @@@ EXPORT_SYMBOL(unblock_all_signals) * System call entry points. */
+/** + * sys_restart_syscall - restart a system call + */ SYSCALL_DEFINE0(restart_syscall) { struct restart_block *restart = ¤t_thread_info()->restart_block; @@@ -2131,13 -2125,6 +2131,13 @@@ int sigprocmask(int how, sigset_t *set return error; }
+/** + * sys_rt_sigprocmask - change the list of currently blocked signals + * @how: whether to add, remove, or set signals + * @set: stores pending signals + * @oset: previous value of signal mask if non-null + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, sigset_t __user *, oset, size_t, sigsetsize) { @@@ -2196,14 -2183,8 +2196,14 @@@ long do_sigpending(void __user *set, un
out: return error; -} +}
+/** + * sys_rt_sigpending - examine a pending signal that has been raised + * while blocked + * @set: stores pending signals + * @sigsetsize: size of sigset_t type or larger + */ SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) { return do_sigpending(set, sigsetsize); @@@ -2252,9 -2233,9 +2252,9 @@@ int copy_siginfo_to_user(siginfo_t __us err |= __put_user(from->si_trapno, &to->si_trapno); #endif #ifdef BUS_MCEERR_AO - /* + /* * Other callers might not initialize the si_lsb field, - * so check explicitely for the right codes here. + * so check explicitly for the right codes here. */ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); @@@ -2283,14 -2264,6 +2283,14 @@@
#endif
+/** + * sys_rt_sigtimedwait - synchronously wait for queued signals specified + * in @uthese + * @uthese: queued signals to wait for + * @uinfo: if non-null, the signal's siginfo is returned here + * @uts: upper bound on process time suspension + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) @@@ -2307,7 -2280,7 +2307,7 @@@
if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; - + /* * Invert the set of allowed signals to get those we * want to block. @@@ -2332,11 -2305,9 +2332,11 @@@ + (ts.tv_sec || ts.tv_nsec));
if (timeout) { - /* None ready -- temporarily unblock those we're + /* + * None ready -- temporarily unblock those we're * interested while we are sleeping in so that we'll - * be awakened when they arrive. */ + * be awakened when they arrive. + */ current->real_blocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); @@@ -2368,11 -2339,6 +2368,11 @@@ return ret; }
+/** + * sys_kill - send a signal to a process + * @pid: the PID of the process + * @sig: signal to be sent + */ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; @@@ -2448,11 -2414,7 +2448,11 @@@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pi return do_tkill(tgid, pid, sig); }
-/* +/** + * sys_tkill - send signal to one specific task + * @pid: the PID of the task + * @sig: signal to be sent + * * Send a signal to only one task, even if it's a CLONE_THREAD task. */ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) @@@ -2464,12 -2426,6 +2464,12 @@@ return do_tkill(0, pid, sig); }
+/** + * sys_rt_sigqueueinfo - send signal information to a signal + * @pid: the PID of the thread + * @sig: signal to be sent + * @uinfo: signal info to be sent + */ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { @@@ -2597,11 -2553,12 +2597,11 @@@ do_sigaltstack (const stack_t __user *u
error = -EINVAL; /* - * - * Note - this code used to test ss_flags incorrectly + * Note - this code used to test ss_flags incorrectly: * old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older - * mechanism + * mechanism. */ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; @@@ -2635,10 -2592,6 +2635,10 @@@ out
#ifdef __ARCH_WANT_SYS_SIGPENDING
+/** + * sys_sigpending - examine pending signals + * @set: where mask of pending signal is returned + */ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { return do_sigpending(set, sizeof(*set)); @@@ -2647,15 -2600,8 +2647,15 @@@ #endif
#ifdef __ARCH_WANT_SYS_SIGPROCMASK -/* Some platforms have their own version with special arguments others - support only sys_rt_sigprocmask. */ +/** + * sys_sigprocmask - examine and change blocked signals + * @how: whether to add, remove, or set signals + * @set: signals to add or remove (if non-null) + * @oset: previous value of signal mask if non-null + * + * Some platforms have their own version with special arguments; + * others support only sys_rt_sigprocmask. + */
SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, old_sigset_t __user *, oset) @@@ -2708,13 -2654,6 +2708,13 @@@ out #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
#ifdef __ARCH_WANT_SYS_RT_SIGACTION +/** + * sys_rt_sigaction - alter an action taken by a process + * @sig: signal to be sent + * @act: the thread group ID of the thread + * @oact: the PID of the thread + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, @@@ -2801,12 -2740,6 +2801,12 @@@ SYSCALL_DEFINE0(pause #endif
#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND +/** + * sys_rt_sigsuspend - replace the signal mask for a value with the + * @unewset value until a signal is received + * @unewset: new signal mask value + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) { sigset_t newset; diff --combined net/bluetooth/hci_core.c index 2216620,42d5ff0..c83f618 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@@ -186,7 -186,6 +186,7 @@@ static void hci_reset_req(struct hci_de BT_DBG("%s %ld", hdev->name, opt);
/* Reset device */ + set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); }
@@@ -214,10 -213,8 +214,10 @@@ static void hci_init_req(struct hci_de /* Mandatory initialization */
/* Reset */ - if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) + if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { + set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); + }
/* Read Local Supported Features */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); @@@ -587,9 -584,6 +587,9 @@@ static int hci_dev_do_close(struct hci_ hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev);
+ /* Stop timer, it might be running */ + del_timer_sync(&hdev->cmd_timer); + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { hci_req_unlock(hdev); return 0; @@@ -629,6 -623,7 +629,6 @@@
/* Drop last sent command */ if (hdev->sent_cmd) { - del_timer_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } @@@ -1079,7 -1074,6 +1079,7 @@@ static void hci_cmd_timer(unsigned lon
BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); + clear_bit(HCI_RESET, &hdev->flags); tasklet_schedule(&hdev->cmd_task); }
@@@ -1883,7 -1877,7 +1883,7 @@@ static void hci_tx_task(unsigned long a read_unlock(&hci_task_lock); }
- /* ----- HCI RX task (incoming data proccessing) ----- */ + /* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) diff --combined net/bluetooth/l2cap_sock.c index f77308e,36b9c5d..299fe56 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@@ -679,7 -679,7 +679,7 @@@ static int l2cap_sock_setsockopt(struc
if (opt == BT_FLUSHABLE_OFF) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; - /* proceed futher only when we have l2cap_conn and + /* proceed further only when we have l2cap_conn and No Flush support in the LM */ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { err = -EINVAL; @@@ -923,9 -923,8 +923,9 @@@ void __l2cap_sock_close(struct sock *sk rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); - } else - l2cap_chan_del(sk, reason); + } + + l2cap_chan_del(sk, reason); break;
case BT_CONNECT: diff --combined net/ceph/osd_client.c index 3b91d65,8d4ee7e..50af027 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@@ -837,7 -837,8 +837,7 @@@ static void __unregister_request(struc dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } - if (list_empty(&req->r_osd_item) && - list_empty(&req->r_linger_item)) + if (list_empty(&req->r_linger_item)) req->r_osd = NULL; }
@@@ -882,8 -883,7 +882,8 @@@ static void __unregister_linger_request dout("moving osd to %p lru\n", req->r_osd); __move_osd_to_lru(osdc, req->r_osd); } - req->r_osd = NULL; + if (list_empty(&req->r_osd_item)) + req->r_osd = NULL; } }
@@@ -917,7 -917,7 +917,7 @@@ EXPORT_SYMBOL(ceph_osdc_set_request_lin /* * Pick an osd (the first 'up' osd in the pg), allocate the osd struct * (as needed), and set the request r_osd appropriately. If there is - * no up osd, set r_osd to NULL. Move the request to the appropiate list + * no up osd, set r_osd to NULL. Move the request to the appropriate list * (unsent, homeless) or leave on in-flight lru. * * Return 0 if unchanged, 1 if changed, or negative on error. @@@ -1602,11 -1602,11 +1602,11 @@@ void handle_watch_notify(struct ceph_os cookie, ver, event); if (event) { event_work = kmalloc(sizeof(*event_work), GFP_NOIO); - INIT_WORK(&event_work->work, do_event_work); if (!event_work) { dout("ERROR: could not allocate event_work\n"); goto done_err; } + INIT_WORK(&event_work->work, do_event_work); event_work->event = event; event_work->ver = ver; event_work->notify_id = notify_id; @@@ -1672,7 -1672,7 +1672,7 @@@ int ceph_osdc_start_request(struct ceph if (req->r_sent == 0) { rc = __map_request(osdc, req); if (rc < 0) - return rc; + goto out_unlock; if (req->r_osd == NULL) { dout("send_request %p no up osds in pg\n", req); ceph_monc_request_next_osdmap(&osdc->client->monc); @@@ -1689,8 -1689,6 +1689,8 @@@ } } } + +out_unlock: mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); return rc; diff --combined net/core/dev.c index 3da9fb0,56c3e00..956d3b0 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -1454,27 -1454,6 +1454,27 @@@ static inline void net_timestamp_check( __net_timestamp(skb); }
+static inline bool is_skb_forwardable(struct net_device *dev, + struct sk_buff *skb) +{ + unsigned int len; + + if (!(dev->flags & IFF_UP)) + return false; + + len = dev->mtu + dev->hard_header_len + VLAN_HLEN; + if (skb->len <= len) + return true; + + /* if TSO is enabled, we don't care about the length as the packet + * could be forwarded without being segmented before + */ + if (skb_is_gso(skb)) + return true; + + return false; +} + /** * dev_forward_skb - loopback an skb to another netif * @@@ -1498,7 -1477,8 +1498,7 @@@ int dev_forward_skb(struct net_device * skb_orphan(skb); nf_reset(skb);
- if (unlikely(!(dev->flags & IFF_UP) || - (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) { + if (unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; @@@ -2091,7 -2071,7 +2091,7 @@@ int dev_hard_start_xmit(struct sk_buff u32 features;
/* - * If device doesnt need skb->dst, release it right now while + * If device doesn't need skb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) @@@ -2151,7 -2131,7 +2151,7 @@@ gso nskb->next = NULL;
/* - * If device doesnt need nskb->dst, release it right now while + * If device doesn't need nskb->dst, release it right now while * its hot in this cpu cache */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) @@@ -2970,8 -2950,8 +2970,8 @@@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions * a compare and 2 stores extra right now if we dont have it on * but have CONFIG_NET_CLS_ACT - * NOTE: This doesnt stop any functionality; if you dont have - * the ingress scheduler, you just cant add policies on ingress. + * NOTE: This doesn't stop any functionality; if you dont have + * the ingress scheduler, you just can't add policies on ingress. * */ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) @@@ -3800,7 -3780,7 +3800,7 @@@ static void net_rx_action(struct softir * with netpoll's poll_napi(). Only the entity which * obtains the lock and sees NAPI_STATE_SCHED set will * actually make the ->poll() call. Therefore we avoid - * accidently calling ->poll() when NAPI is not scheduled. + * accidentally calling ->poll() when NAPI is not scheduled. */ work = 0; if (test_bit(NAPI_STATE_SCHED, &n->state)) { @@@ -6336,7 -6316,7 +6336,7 @@@ static void __net_exit default_device_e if (dev->rtnl_link_ops) continue;
- /* Push remaing network devices to init_net */ + /* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { diff --combined net/ipv4/tcp_output.c index 8b0d016,64f30ec..17388c7 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -73,7 -73,7 +73,7 @@@ static void tcp_event_new_data_sent(str tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
- /* Don't override Nagle indefinately with F-RTO */ + /* Don't override Nagle indefinitely with F-RTO */ if (tp->frto_counter == 2) tp->frto_counter = 3;
@@@ -1003,8 -1003,7 +1003,8 @@@ int tcp_fragment(struct sock *sk, struc int nlen; u8 flags;
- BUG_ON(len > skb->len); + if (WARN_ON(len > skb->len)) + return -EINVAL;
nsize = skb_headlen(skb) - len; if (nsize < 0) diff --combined net/mac80211/rc80211_minstrel_ht.c index dbdebed,78e67d2..c06aa3a --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@@ -259,7 -259,7 +259,7 @@@ minstrel_ht_update_stats(struct minstre } }
- /* try to sample up to half of the availble rates during each interval */ + /* try to sample up to half of the available rates during each interval */ mi->sample_count *= 4;
cur_prob = 0; @@@ -659,14 -659,18 +659,14 @@@ minstrel_ht_update_caps(void *priv, str struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; struct ieee80211_local *local = hw_to_local(mp->hw); u16 sta_cap = sta->ht_cap.cap; + int n_supported = 0; int ack_dur; int stbc; int i;
/* fall back to the old minstrel for legacy stations */ - if (!sta->ht_cap.ht_supported) { - msp->is_ht = false; - memset(&msp->legacy, 0, sizeof(msp->legacy)); - msp->legacy.r = msp->ratelist; - msp->legacy.sample_table = msp->sample_table; - return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); - } + if (!sta->ht_cap.ht_supported) + goto use_legacy;
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); @@@ -721,22 -725,7 +721,22 @@@
mi->groups[i].supported = mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; + + if (mi->groups[i].supported) + n_supported++; } + + if (!n_supported) + goto use_legacy; + + return; + +use_legacy: + msp->is_ht = false; + memset(&msp->legacy, 0, sizeof(msp->legacy)); + msp->legacy.r = msp->ratelist; + msp->legacy.sample_table = msp->sample_table; + return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); }
static void diff --combined net/mac80211/rx.c index aa5cc37,c50b684..9d192d6 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -381,7 -381,7 +381,7 @@@ static void ieee80211_parse_qos(struct * specs were sane enough this time around to require padding each A-MSDU * subframe to a length that is a multiple of four. * - * Padding like Atheros hardware adds which is inbetween the 802.11 header and + * Padding like Atheros hardware adds which is between the 802.11 header and * the payload is not supported, the driver is required to move the 802.11 * header to be directly in front of the payload in that case. */ @@@ -612,8 -612,7 +612,8 @@@ static void ieee80211_sta_reorder_relea skipped++; continue; } - if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + + if (skipped && + !time_after(jiffies, tid_agg_rx->reorder_time[j] + HT_RX_REORDER_BUF_TIMEOUT)) goto set_release_timer;
diff --combined sound/pci/hda/patch_realtek.c index 12c6f45,1e5a786..7e28a64 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@@ -549,7 -549,7 +549,7 @@@ static int alc_ch_mode_put(struct snd_k
/* * Control the mode of pin widget settings via the mixer. "pc" is used - * instead of "%" to avoid consequences of accidently treating the % as + * instead of "%" to avoid consequences of accidentally treating the % as * being part of a format specifier. Maximum allowed length of a value is * 63 characters plus NULL terminator. * @@@ -9836,7 -9836,7 +9836,7 @@@ static struct snd_pci_quirk alc882_cfg_
SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
- SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG), + SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavilion", ALC883_6ST_DIG), SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP), SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP), SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG), @@@ -9863,6 -9863,7 +9863,6 @@@ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), @@@ -10699,7 -10700,6 +10699,7 @@@ enum PINFIX_LENOVO_Y530, PINFIX_PB_M5210, PINFIX_ACER_ASPIRE_7736, + PINFIX_GIGABYTE_880GM, };
static const struct alc_fixup alc882_fixups[] = { @@@ -10731,13 -10731,6 +10731,13 @@@ .type = ALC_FIXUP_SKU, .v.sku = ALC_FIXUP_SKU_IGNORE, }, + [PINFIX_GIGABYTE_880GM] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x14, 0x1114410 }, /* set as speaker */ + { } + } + }, };
static struct snd_pci_quirk alc882_fixup_tbl[] = { @@@ -10745,7 -10738,6 +10745,7 @@@ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM), {} };
@@@ -18782,6 -18774,8 +18782,6 @@@ static struct snd_pci_quirk alc662_cfg_ ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO), SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", - ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA), @@@ -19455,7 -19449,6 +19455,7 @@@ enum ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, ALC662_FIXUP_CZC_P10T, + ALC662_FIXUP_GIGABYTE, };
static const struct alc_fixup alc662_fixups[] = { @@@ -19484,20 -19477,12 +19484,20 @@@ {} } }, + [ALC662_FIXUP_GIGABYTE] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x14, 0x1114410 }, /* set as speaker */ + { } + } + }, };
static struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), diff --combined sound/soc/codecs/tlv320dac33.c index eb1a0b4,f01f141..082e9d5 --- a/sound/soc/codecs/tlv320dac33.c +++ b/sound/soc/codecs/tlv320dac33.c @@@ -324,10 -324,6 +324,10 @@@ static void dac33_init_chip(struct snd_ dac33_write(codec, DAC33_OUT_AMP_CTRL, dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL));
+ dac33_write(codec, DAC33_LDAC_PWR_CTRL, + dac33_read_reg_cache(codec, DAC33_LDAC_PWR_CTRL)); + dac33_write(codec, DAC33_RDAC_PWR_CTRL, + dac33_read_reg_cache(codec, DAC33_RDAC_PWR_CTRL)); }
static inline int dac33_read_id(struct snd_soc_codec *codec) @@@ -674,7 -670,6 +674,7 @@@ static inline void dac33_prefill_handle { struct snd_soc_codec *codec = dac33->codec; unsigned int delay; + unsigned long flags;
switch (dac33->fifo_mode) { case DAC33_FIFO_MODE1: @@@ -682,10 -677,10 +682,10 @@@ DAC33_THRREG(dac33->nsample));
/* Take the timestamps */ - spin_lock_irq(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); dac33->t_stamp2 = ktime_to_us(ktime_get()); dac33->t_stamp1 = dac33->t_stamp2; - spin_unlock_irq(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_PREFILL_MSB, DAC33_THRREG(dac33->alarm_threshold)); @@@ -697,11 -692,11 +697,11 @@@ break; case DAC33_FIFO_MODE7: /* Take the timestamp */ - spin_lock_irq(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); dac33->t_stamp1 = ktime_to_us(ktime_get()); /* Move back the timestamp with drain time */ dac33->t_stamp1 -= dac33->mode7_us_to_lthr; - spin_unlock_irq(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_PREFILL_MSB, DAC33_THRREG(DAC33_MODE7_MARGIN)); @@@ -719,14 -714,13 +719,14 @@@ static inline void dac33_playback_handler(struct tlv320dac33_priv *dac33) { struct snd_soc_codec *codec = dac33->codec; + unsigned long flags;
switch (dac33->fifo_mode) { case DAC33_FIFO_MODE1: /* Take the timestamp */ - spin_lock_irq(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); dac33->t_stamp2 = ktime_to_us(ktime_get()); - spin_unlock_irq(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_NSAMPLE_MSB, DAC33_THRREG(dac33->nsample)); @@@ -779,11 -773,10 +779,11 @@@ static irqreturn_t dac33_interrupt_hand { struct snd_soc_codec *codec = dev; struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); + unsigned long flags;
- spin_lock(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); dac33->t_stamp1 = ktime_to_us(ktime_get()); - spin_unlock(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags);
/* Do not schedule the workqueue in Mode7 */ if (dac33->fifo_mode != DAC33_FIFO_MODE7) @@@ -1027,7 -1020,7 +1027,7 @@@ static int dac33_prepare_chip(struct sn /* * For FIFO bypass mode: * Enable the FIFO bypass (Disable the FIFO use) - * Set the BCLK as continous + * Set the BCLK as continuous */ fifoctrl_a |= DAC33_FBYPAS; aictrl_b |= DAC33_BCLKON; @@@ -1180,16 -1173,15 +1180,16 @@@ static snd_pcm_sframes_t dac33_dai_dela unsigned int time_delta, uthr; int samples_out, samples_in, samples; snd_pcm_sframes_t delay = 0; + unsigned long flags;
switch (dac33->fifo_mode) { case DAC33_FIFO_BYPASS: break; case DAC33_FIFO_MODE1: - spin_lock(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); t0 = dac33->t_stamp1; t1 = dac33->t_stamp2; - spin_unlock(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags); t_now = ktime_to_us(ktime_get());
/* We have not started to fill the FIFO yet, delay is 0 */ @@@ -1254,10 -1246,10 +1254,10 @@@ } break; case DAC33_FIFO_MODE7: - spin_lock(&dac33->lock); + spin_lock_irqsave(&dac33->lock, flags); t0 = dac33->t_stamp1; uthr = dac33->uthr; - spin_unlock(&dac33->lock); + spin_unlock_irqrestore(&dac33->lock, flags); t_now = ktime_to_us(ktime_get());
/* We have not started to fill the FIFO yet, delay is 0 */
linux-merge@lists.open-mesh.org