diff options
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | drivers/acpi/nfit/core.c | 48 | ||||
-rw-r--r-- | drivers/mtd/nand/davinci_nand.c | 3 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/core.c | 8 | ||||
-rw-r--r-- | drivers/nvdimm/nd.h | 22 | ||||
-rw-r--r-- | drivers/nvdimm/region_devs.c | 22 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 29 | ||||
-rw-r--r-- | kernel/cpuset.c | 19 | ||||
-rw-r--r-- | mm/ksm.c | 3 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 10 | ||||
-rw-r--r-- | scripts/recordmcount.c | 1 | ||||
-rwxr-xr-x | scripts/recordmcount.pl | 1 | ||||
-rw-r--r-- | tools/testing/nvdimm/test/nfit.c | 3 |
15 files changed, 125 insertions, 50 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 01bff8ea28d8..b003d0ca6238 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8745,7 +8745,7 @@ F: drivers/oprofile/ F: include/linux/oprofile.h ORACLE CLUSTER FILESYSTEM 2 (OCFS2) -M: Mark Fasheh <mfasheh@suse.com> +M: Mark Fasheh <mfasheh@versity.com> M: Joel Becker <jlbec@evilplan.org> L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) W: http://ocfs2.wiki.kernel.org diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 80cc7c089a15..e1d5ea6d5e40 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) return to_acpi_device(acpi_desc->dev); } -static int xlat_status(void *buf, unsigned int cmd) +static int xlat_status(void *buf, unsigned int cmd, u32 status) { struct nd_cmd_clear_error *clear_err; struct nd_cmd_ars_status *ars_status; - struct nd_cmd_ars_start *ars_start; - struct nd_cmd_ars_cap *ars_cap; u16 flags; switch (cmd) { case ND_CMD_ARS_CAP: - ars_cap = buf; - if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) + if ((status & 0xffff) == NFIT_ARS_CAP_NONE) return -ENOTTY; /* Command failed */ - if (ars_cap->status & 0xffff) + if (status & 0xffff) return -EIO; /* No supported scan types for this range */ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; - if ((ars_cap->status >> 16 & flags) == 0) + if ((status >> 16 & flags) == 0) return -ENOTTY; break; case ND_CMD_ARS_START: - ars_start = buf; /* ARS is in progress */ - if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) + if ((status & 0xffff) == NFIT_ARS_START_BUSY) return -EBUSY; /* Command failed */ - if (ars_start->status & 0xffff) + if (status & 0xffff) return -EIO; break; case ND_CMD_ARS_STATUS: ars_status = buf; /* Command failed */ - if (ars_status->status & 0xffff) + if (status & 0xffff) return -EIO; /* Check extended status (Upper two bytes) */ - if (ars_status->status == NFIT_ARS_STATUS_DONE) + if (status == NFIT_ARS_STATUS_DONE) return 0; /* ARS is in progress */ - if (ars_status->status == NFIT_ARS_STATUS_BUSY) + if (status == NFIT_ARS_STATUS_BUSY) return -EBUSY; /* No ARS performed for the current boot */ - if (ars_status->status == NFIT_ARS_STATUS_NONE) + if (status == NFIT_ARS_STATUS_NONE) return -EAGAIN; /* @@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd) * agent wants the scan to stop. If we didn't overflow * then just continue with the returned results. */ - if (ars_status->status == NFIT_ARS_STATUS_INTR) { + if (status == NFIT_ARS_STATUS_INTR) { if (ars_status->flags & NFIT_ARS_F_OVERFLOW) return -ENOSPC; return 0; } /* Unknown status */ - if (ars_status->status >> 16) + if (status >> 16) return -EIO; break; case ND_CMD_CLEAR_ERROR: clear_err = buf; - if (clear_err->status & 0xffff) + if (status & 0xffff) return -EIO; if (!clear_err->cleared) return -EIO; @@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd) break; } + /* all other non-zero status results in an error */ + if (status) + return -EIO; return 0; } @@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nd_cmd_pkg *call_pkg = NULL; const char *cmd_name, *dimm_name; unsigned long cmd_mask, dsm_mask; + u32 offset, fw_status = 0; acpi_handle handle; unsigned int func; const u8 *uuid; - u32 offset; int rc, i; func = cmd; @@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, out_obj->buffer.pointer + offset, out_size); offset += out_size; } + + /* + * Set fw_status for all the commands with a known format to be + * later interpreted by xlat_status(). + */ + if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR) + || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR))) + fw_status = *(u32 *) out_obj->buffer.pointer; + if (offset + in_buf.buffer.length < buf_len) { if (i >= 1) { /* @@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, */ rc = buf_len - offset - in_buf.buffer.length; if (cmd_rc) - *cmd_rc = xlat_status(buf, cmd); + *cmd_rc = xlat_status(buf, cmd, fw_status); } else { dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", __func__, dimm_name, cmd_name, buf_len, @@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, } else { rc = 0; if (cmd_rc) - *cmd_rc = xlat_status(buf, cmd); + *cmd_rc = xlat_status(buf, cmd, fw_status); } out: diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index cc07ba0f044d..27fa8b87cd5f 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode) unsigned long flags; u32 val; + /* Reset ECC hardware */ + davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); + spin_lock_irqsave(&davinci_nand_lock, flags); /* Start 4-bit ECC calculation for read/write */ diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index a59361c36f40..5513bfd9cdc9 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -2169,7 +2169,7 @@ scan_tail: return 0; return_error: - if (info->dma) + if (!IS_ERR_OR_NULL(info->dma)) dma_release_channel(info->dma); if (nand_chip->ecc.priv) { nand_bch_free(nand_chip->ecc.priv); diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 715583f69d28..4d7bbd2df5c0 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, nvdimm_map->size = size; kref_init(&nvdimm_map->kref); - if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) + if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { + dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", + &offset, size, dev_name(dev)); goto err_request_region; + } if (flags) nvdimm_map->mem = memremap(offset, size, flags); @@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, kref_get(&nvdimm_map->kref); nvdimm_bus_unlock(dev); + if (!nvdimm_map) + return NULL; + if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) return NULL; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 8024a0ef86d3..0b78a8211f4a 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -52,10 +52,28 @@ struct nvdimm_drvdata { struct nd_region_data { int ns_count; int ns_active; - unsigned int flush_mask; - void __iomem *flush_wpq[0][0]; + unsigned int hints_shift; + void __iomem *flush_wpq[0]; }; +static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd, + int dimm, int hint) +{ + unsigned int num = 1 << ndrd->hints_shift; + unsigned int mask = num - 1; + + return ndrd->flush_wpq[dimm * num + (hint & mask)]; +} + +static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm, + int hint, void __iomem *flush) +{ + unsigned int num = 1 << ndrd->hints_shift; + unsigned int mask = num - 1; + + ndrd->flush_wpq[dimm * num + (hint & mask)] = flush; +} + static inline struct nd_namespace_index *to_namespace_index( struct nvdimm_drvdata *ndd, int i) { diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e8d5ba7b29af..4c0ac4abb629 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); - for (i = 0; i < nvdimm->num_flush; i++) { + for (i = 0; i < (1 << ndrd->hints_shift); i++) { struct resource *res = &nvdimm->flush_wpq[i]; unsigned long pfn = PHYS_PFN(res->start); void __iomem *flush_page; @@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, if (j < i) flush_page = (void __iomem *) ((unsigned long) - ndrd->flush_wpq[dimm][j] & PAGE_MASK); + ndrd_get_flush_wpq(ndrd, dimm, j) + & PAGE_MASK); else flush_page = devm_nvdimm_ioremap(dev, - PHYS_PFN(pfn), PAGE_SIZE); + PFN_PHYS(pfn), PAGE_SIZE); if (!flush_page) return -ENXIO; - ndrd->flush_wpq[dimm][i] = flush_page - + (res->start & ~PAGE_MASK); + ndrd_set_flush_wpq(ndrd, dimm, i, flush_page + + (res->start & ~PAGE_MASK)); } return 0; @@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region) return -ENOMEM; dev_set_drvdata(dev, ndrd); - ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; + if (!num_flush) + return 0; + + ndrd->hints_shift = ilog2(num_flush); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region) */ wmb(); for (i = 0; i < nd_region->ndr_mappings; i++) - if (ndrd->flush_wpq[i][0]) - writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); + if (ndrd_get_flush_wpq(ndrd, i, 0)) + writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); wmb(); } EXPORT_SYMBOL_GPL(nvdimm_flush); @@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region) for (i = 0; i < nd_region->ndr_mappings; i++) /* flush hints present, flushing required */ - if (ndrd->flush_wpq[i][0]) + if (ndrd_get_flush_wpq(ndrd, i, 0)) return 1; /* diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 66533e18276c..dc69df04abc1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev, #define dma_mmap_writecombine dma_mmap_wc #endif -#ifdef CONFIG_NEED_DMA_MAP_STATE +#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5e8dab5bf9ad..d6b729beba49 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, * Except for the root, subtree_control must be zero for a cgroup * with tasks so that child cgroups don't compete against tasks. */ - if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { - ret = -EBUSY; - goto out_unlock; + if (enable && cgroup_parent(cgrp)) { + struct cgrp_cset_link *link; + + /* + * Because namespaces pin csets too, @cgrp->cset_links + * might not be empty even when @cgrp is empty. Walk and + * verify each cset. + */ + spin_lock_irq(&css_set_lock); + + ret = 0; + list_for_each_entry(link, &cgrp->cset_links, cset_link) { + if (css_set_populated(link->cset)) { + ret = -EBUSY; + break; + } + } + + spin_unlock_irq(&css_set_lock); + + if (ret) + goto out_unlock; } /* save and update control masks and prepare csses */ @@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile) * cgroup_task_count - count the number of tasks in a cgroup. * @cgrp: the cgroup in question * - * Return the number of tasks in the cgroup. + * Return the number of tasks in the cgroup. The returned number can be + * higher than the actual number of tasks due to css_set references from + * namespace roots and temporary usages. */ static int cgroup_task_count(const struct cgroup *cgrp) { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index c27e53326bef..2b4c20ab5bbe 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = { /* * Return in pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy - * until we find one that does have some online cpus. The top - * cpuset always has some cpus online. + * until we find one that does have some online cpus. * * One way or another, we guarantee to return some non-empty subset * of cpu_online_mask. @@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = { */ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) { - while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) + while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { cs = parent_cs(cs); + if (unlikely(!cs)) { + /* + * The top cpuset doesn't have any online cpu as a + * consequence of a race between cpuset_hotplug_work + * and cpu hotplug notifier. But we know the top + * cpuset's effective_cpus is on its way to to be + * identical to cpu_online_mask. + */ + cpumask_copy(pmask, cpu_online_mask); + return; + } + } cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); } @@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) * which could have been changed by cpuset just after it inherits the * state from the parent and before it sits on the cgroup's task list. */ -void cpuset_fork(struct task_struct *task) +static void cpuset_fork(struct task_struct *task) { if (task_css_is_root(task, cpuset_cgrp_id)) return; @@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void) { struct rmap_item *rmap_item; - rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | + __GFP_NORETRY | __GFP_NOWARN); if (rmap_item) ksm_rmap_items++; return rmap_item; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b58906b6215c..9d29ba0f7192 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private, { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; int nid = page_to_nid(page); - nodemask_t nmask = node_online_map; - struct page *new_page; + nodemask_t nmask = node_states[N_MEMORY]; + struct page *new_page = NULL; /* * TODO: allocate a destination hugepage from a nearest neighbor node, @@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private, return alloc_huge_page_node(page_hstate(compound_head(page)), next_node_in(nid, nmask)); - if (nid != next_node_in(nid, nmask)) - node_clear(nid, nmask); + node_clear(nid, nmask); if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages_nodemask(gfp_mask, 0, + if (!nodes_empty(nmask)) + new_page = __alloc_pages_nodemask(gfp_mask, 0, node_zonelist(nid, gfp_mask), &nmask); if (!new_page) new_page = __alloc_pages(gfp_mask, 0, diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 42396a74405d..a68f03133df9 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname) strcmp(".sched.text", txtname) == 0 || strcmp(".spinlock.text", txtname) == 0 || strcmp(".irqentry.text", txtname) == 0 || + strcmp(".softirqentry.text", txtname) == 0 || strcmp(".kprobes.text", txtname) == 0 || strcmp(".text.unlikely", txtname) == 0; } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 96e2486a6fc4..2d48011bc362 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -134,6 +134,7 @@ my %text_sections = ( ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, + ".softirqentry.text" => 1, ".kprobes.text" => 1, ".text.unlikely" => 1, ); diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index dd48f421844c..f64c57bf1d4b 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -603,7 +603,8 @@ static int nfit_test0_alloc(struct nfit_test *t) return -ENOMEM; sprintf(t->label[i], "label%d", i); - t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS, + t->flush[i] = test_alloc(t, max(PAGE_SIZE, + sizeof(u64) * NUM_HINTS), &t->flush_dma[i]); if (!t->flush[i]) return -ENOMEM; |