mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
Merge branch 'drm-intel-fixes-2' of ssh://master.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel into drm-fixes
* 'drm-intel-fixes-2' of ssh://master.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel: (30 commits) drm/i915: Prevent uninitialised reads during error state capture drm/i915: Use consistent mappings for OpRegion between ACPI and i915 drm/i915: Handle the no-interrupts case for UMS by polling drm/i915: Disable high-precision vblank timestamping for UMS drm/i915: Increase the amount of defense before computing vblank timestamps drm/i915,agp/intel: Do not clear stolen entries Remove MAYBE_BUILD_BUG_ON BUILD_BUG_ON: make it handle more cases module: fix missing semicolons in MODULE macro usage param: add null statement to compiled-in module params module: fix linker error for MODULE_VERSION when !MODULE and CONFIG_SYSFS=n module: show version information for built-in modules in sysfs selinux: return -ENOMEM when memory allocation fails tpm: fix panic caused by "tpm: Autodetect itpm devices" TPM: Long default timeout fix trusted keys: Fix a memory leak in trusted_update(). keys: add trusted and encrypted maintainers encrypted-keys: rename encrypted_defined files to encrypted trusted-keys: rename trusted_defined files to trusted drm/i915: Recognise non-VGA display devices ...
This commit is contained in:
commit
abb72c8288
38 changed files with 350 additions and 108 deletions
|
@ -217,8 +217,8 @@ X!Isound/sound_firmware.c
|
|||
<chapter id="uart16x50">
|
||||
<title>16x50 UART Driver</title>
|
||||
!Iinclude/linux/serial_core.h
|
||||
!Edrivers/serial/serial_core.c
|
||||
!Edrivers/serial/8250.c
|
||||
!Edrivers/tty/serial/serial_core.c
|
||||
!Edrivers/tty/serial/8250.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="fbdev">
|
||||
|
|
22
MAINTAINERS
22
MAINTAINERS
|
@ -3674,6 +3674,28 @@ F: include/linux/key-type.h
|
|||
F: include/keys/
|
||||
F: security/keys/
|
||||
|
||||
KEYS-TRUSTED
|
||||
M: David Safford <safford@watson.ibm.com>
|
||||
M: Mimi Zohar <zohar@us.ibm.com>
|
||||
L: linux-security-module@vger.kernel.org
|
||||
L: keyrings@linux-nfs.org
|
||||
S: Supported
|
||||
F: Documentation/keys-trusted-encrypted.txt
|
||||
F: include/keys/trusted-type.h
|
||||
F: security/keys/trusted.c
|
||||
F: security/keys/trusted.h
|
||||
|
||||
KEYS-ENCRYPTED
|
||||
M: Mimi Zohar <zohar@us.ibm.com>
|
||||
M: David Safford <safford@watson.ibm.com>
|
||||
L: linux-security-module@vger.kernel.org
|
||||
L: keyrings@linux-nfs.org
|
||||
S: Supported
|
||||
F: Documentation/keys-trusted-encrypted.txt
|
||||
F: include/keys/encrypted-type.h
|
||||
F: security/keys/encrypted.c
|
||||
F: security/keys/encrypted.h
|
||||
|
||||
KGDB / KDB /debug_core
|
||||
M: Jason Wessel <jason.wessel@windriver.com>
|
||||
W: http://kgdb.wiki.kernel.org/
|
||||
|
|
|
@ -68,6 +68,7 @@ static struct _intel_private {
|
|||
phys_addr_t gma_bus_addr;
|
||||
u32 PGETBL_save;
|
||||
u32 __iomem *gtt; /* I915G */
|
||||
bool clear_fake_agp; /* on first access via agp, fill with scratch */
|
||||
int num_dcache_entries;
|
||||
union {
|
||||
void __iomem *i9xx_flush_page;
|
||||
|
@ -869,21 +870,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
|
|||
|
||||
static int intel_fake_agp_configure(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
||||
intel_private.clear_fake_agp = true;
|
||||
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
|
||||
|
||||
for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
|
||||
intel_private.driver->write_entry(intel_private.scratch_page_dma,
|
||||
i, 0);
|
||||
}
|
||||
readl(intel_private.gtt+i-1); /* PCI Posting. */
|
||||
|
||||
global_cache_flush();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -945,6 +937,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
|
|||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (intel_private.clear_fake_agp) {
|
||||
int start = intel_private.base.stolen_size / PAGE_SIZE;
|
||||
int end = intel_private.base.gtt_mappable_entries;
|
||||
intel_gtt_clear_range(start, end - start);
|
||||
intel_private.clear_fake_agp = false;
|
||||
}
|
||||
|
||||
if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
|
||||
return i810_insert_dcache_entries(mem, pg_start, type);
|
||||
|
||||
|
|
|
@ -364,12 +364,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
|
|||
tpm_protected_ordinal_duration[ordinal &
|
||||
TPM_PROTECTED_ORDINAL_MASK];
|
||||
|
||||
if (duration_idx != TPM_UNDEFINED)
|
||||
if (duration_idx != TPM_UNDEFINED) {
|
||||
duration = chip->vendor.duration[duration_idx];
|
||||
if (duration <= 0)
|
||||
/* if duration is 0, it's because chip->vendor.duration wasn't */
|
||||
/* filled yet, so we set the lowest timeout just to give enough */
|
||||
/* time for tpm_get_timeouts() to succeed */
|
||||
return (duration <= 0 ? HZ : duration);
|
||||
} else
|
||||
return 2 * 60 * HZ;
|
||||
else
|
||||
return duration;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
|
||||
|
||||
|
|
|
@ -493,9 +493,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
|
|||
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
|
||||
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
|
||||
|
||||
if (is_itpm(to_pnp_dev(dev)))
|
||||
itpm = 1;
|
||||
|
||||
if (itpm)
|
||||
dev_info(dev, "Intel iTPM workaround enabled\n");
|
||||
|
||||
|
@ -637,6 +634,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
|
|||
else
|
||||
interrupts = 0;
|
||||
|
||||
if (is_itpm(pnp_dev))
|
||||
itpm = 1;
|
||||
|
||||
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -100,7 +100,10 @@ config DRM_I830
|
|||
config DRM_I915
|
||||
tristate "i915 driver"
|
||||
depends on AGP_INTEL
|
||||
# we need shmfs for the swappable backing store, and in particular
|
||||
# the shmem_readpage() which depends upon tmpfs
|
||||
select SHMEM
|
||||
select TMPFS
|
||||
select DRM_KMS_HELPER
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
|
|
|
@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
int ret;
|
||||
|
||||
master_priv->sarea = drm_getsarea(dev);
|
||||
if (master_priv->sarea) {
|
||||
|
@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||
}
|
||||
|
||||
if (init->ring_size != 0) {
|
||||
if (ring->obj != NULL) {
|
||||
if (LP_RING(dev_priv)->obj != NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("Client tried to initialize ringbuffer in "
|
||||
"GEM mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ring->size = init->ring_size;
|
||||
|
||||
ring->map.offset = init->ring_start;
|
||||
ring->map.size = init->ring_size;
|
||||
ring->map.type = 0;
|
||||
ring->map.flags = 0;
|
||||
ring->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap_wc(&ring->map, dev);
|
||||
|
||||
if (ring->map.handle == NULL) {
|
||||
ret = intel_render_ring_init_dri(dev,
|
||||
init->ring_start,
|
||||
init->ring_size);
|
||||
if (ret) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ring->virtual_start = ring->map.handle;
|
||||
|
||||
dev_priv->cpp = init->cpp;
|
||||
dev_priv->back_offset = init->back_offset;
|
||||
dev_priv->front_offset = init->front_offset;
|
||||
|
@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
DRM_INFO("failed to find VBIOS tables\n");
|
||||
|
||||
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
|
||||
/* If we have > 1 VGA cards, then we need to arbitrate access
|
||||
* to the common VGA resources.
|
||||
*
|
||||
* If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
|
||||
* then we do not take part in VGA arbitration and the
|
||||
* vga_client_register() fails with -ENODEV.
|
||||
*/
|
||||
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
|
||||
if (ret)
|
||||
if (ret && ret != -ENODEV)
|
||||
goto cleanup_ringbuffer;
|
||||
|
||||
intel_register_dsm_handler();
|
||||
|
|
|
@ -60,7 +60,7 @@ extern int intel_agp_enabled;
|
|||
|
||||
#define INTEL_VGA_DEVICE(id, info) { \
|
||||
.class = PCI_CLASS_DISPLAY_VGA << 8, \
|
||||
.class_mask = 0xffff00, \
|
||||
.class_mask = 0xff0000, \
|
||||
.vendor = 0x8086, \
|
||||
.device = id, \
|
||||
.subvendor = PCI_ANY_ID, \
|
||||
|
@ -752,6 +752,9 @@ static int __init i915_init(void)
|
|||
driver.driver_features &= ~DRIVER_MODESET;
|
||||
#endif
|
||||
|
||||
if (!(driver.driver_features & DRIVER_MODESET))
|
||||
driver.get_vblank_timestamp = NULL;
|
||||
|
||||
return drm_init(&driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -543,8 +543,11 @@ typedef struct drm_i915_private {
|
|||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head gtt_list;
|
||||
/** End of mappable part of GTT */
|
||||
|
||||
/** Usable portion of the GTT for GEM */
|
||||
unsigned long gtt_start;
|
||||
unsigned long gtt_mappable_end;
|
||||
unsigned long gtt_end;
|
||||
|
||||
struct io_mapping *gtt_mapping;
|
||||
int gtt_mtrr;
|
||||
|
|
|
@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_mm_init(&dev_priv->mm.gtt_space, start,
|
||||
end - start);
|
||||
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
|
||||
|
||||
dev_priv->mm.gtt_start = start;
|
||||
dev_priv->mm.gtt_mappable_end = mappable_end;
|
||||
dev_priv->mm.gtt_end = end;
|
||||
dev_priv->mm.gtt_total = end - start;
|
||||
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
|
||||
dev_priv->mm.gtt_mappable_end = mappable_end;
|
||||
|
||||
/* Take over this portion of the GTT */
|
||||
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
|||
|
||||
seqno = ring->get_seqno(ring);
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
|
||||
if (seqno >= ring->sync_seqno[i])
|
||||
ring->sync_seqno[i] = 0;
|
||||
|
||||
|
|
|
@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(dev, ring);
|
||||
for (i = 0; i < I915_NUM_RINGS-1; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
|
||||
if (seqno < ring->sync_seqno[i]) {
|
||||
/* The GPU can not handle its semaphore value wrapping,
|
||||
* so every billion or so execbuffers, we need to stall
|
||||
|
|
|
@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* First fill our portion of the GTT with scratch pages */
|
||||
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
|
||||
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
|
||||
i915_gem_clflush_object(obj);
|
||||
|
||||
|
|
|
@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
|
||||
int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_crtc *drmcrtc;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (crtc < 0 || crtc >= dev->num_crtcs) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
if (pipe < 0 || pipe >= dev_priv->num_pipe) {
|
||||
DRM_ERROR("Invalid crtc %d\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get drm_crtc to timestamp: */
|
||||
drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
|
||||
crtc = intel_get_crtc_for_pipe(dev, pipe);
|
||||
if (crtc == NULL) {
|
||||
DRM_ERROR("Invalid crtc %d\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!crtc->enabled) {
|
||||
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Helper routine in DRM core does all the work: */
|
||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
||||
vblank_time, flags, drmcrtc);
|
||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
|
||||
vblank_time, flags,
|
||||
crtc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev,
|
|||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 seqno = ring->get_seqno(ring);
|
||||
u32 seqno;
|
||||
|
||||
if (ring->obj == NULL)
|
||||
return;
|
||||
|
||||
seqno = ring->get_seqno(ring);
|
||||
trace_i915_gem_request_complete(dev, seqno);
|
||||
|
||||
ring->irq_seqno = seqno;
|
||||
|
@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
i++;
|
||||
error->pinned_bo_count = i - error->active_bo_count;
|
||||
|
||||
error->active_bo = NULL;
|
||||
error->pinned_bo = NULL;
|
||||
if (i) {
|
||||
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
|
||||
GFP_ATOMIC);
|
||||
|
@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
if (master_priv->sarea_priv)
|
||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (ring->irq_get(ring)) {
|
||||
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
ring->irq_put(ring);
|
||||
}
|
||||
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
|
||||
ret = -EBUSY;
|
||||
|
||||
if (ret == -EBUSY) {
|
||||
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
|
||||
|
|
|
@ -513,6 +513,10 @@
|
|||
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
|
||||
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
|
||||
|
||||
#define GEN6_BLITTER_ECOSKPD 0x221d0
|
||||
#define GEN6_BLITTER_LOCK_SHIFT 16
|
||||
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
|
||||
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
|
||||
|
@ -2626,6 +2630,8 @@
|
|||
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
|
||||
|
||||
#define PCH_DSPCLK_GATE_D 0x42020
|
||||
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
|
||||
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
|
||||
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
|
||||
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
|
||||
|
||||
|
|
|
@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
|
|||
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
|
||||
}
|
||||
|
||||
static void sandybridge_blit_fbc_update(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 blt_ecoskpd;
|
||||
|
||||
/* Make sure blitter notifies FBC of writes */
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
|
||||
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
|
||||
GEN6_BLITTER_LOCK_SHIFT;
|
||||
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
|
||||
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
|
||||
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
|
||||
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
|
||||
GEN6_BLITTER_LOCK_SHIFT);
|
||||
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
|
||||
POSTING_READ(GEN6_BLITTER_ECOSKPD);
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
I915_WRITE(SNB_DPFC_CTL_SA,
|
||||
SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
|
||||
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
|
||||
sandybridge_blit_fbc_update(dev);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
|
||||
|
@ -6286,7 +6307,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
|
|||
|
||||
if (IS_GEN5(dev)) {
|
||||
/* Required for FBC */
|
||||
dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
|
||||
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
|
||||
DPFCRUNIT_CLOCK_GATE_DISABLE |
|
||||
DPFDUNIT_CLOCK_GATE_DISABLE;
|
||||
/* Required for CxSR */
|
||||
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/acpi_io.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
base = ioremap(asls, OPREGION_SIZE);
|
||||
base = acpi_os_ioremap(asls, OPREGION_SIZE);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -34,6 +34,14 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static inline int ring_space(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
|
||||
if (space < 0)
|
||||
space += ring->size;
|
||||
return space;
|
||||
}
|
||||
|
||||
static u32 i915_gem_get_seqno(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
|||
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
|
||||
i915_kernel_lost_context(ring->dev);
|
||||
else {
|
||||
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
ring->head = I915_READ_HEAD(ring);
|
||||
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
ring->space = ring_space(ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
|||
}
|
||||
|
||||
ring->tail = 0;
|
||||
ring->space = ring->head - 8;
|
||||
ring->space = ring_space(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
||||
{
|
||||
int reread = 0;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long end;
|
||||
u32 head;
|
||||
|
||||
/* If the reported head position has wrapped or hasn't advanced,
|
||||
* fallback to the slow and accurate path.
|
||||
*/
|
||||
head = intel_read_status_page(ring, 4);
|
||||
if (head > ring->head) {
|
||||
ring->head = head;
|
||||
ring->space = ring_space(ring);
|
||||
if (ring->space >= n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace_i915_ring_wait_begin (dev);
|
||||
end = jiffies + 3 * HZ;
|
||||
do {
|
||||
/* If the reported head position has wrapped or hasn't advanced,
|
||||
* fallback to the slow and accurate path.
|
||||
*/
|
||||
head = intel_read_status_page(ring, 4);
|
||||
if (reread)
|
||||
head = I915_READ_HEAD(ring);
|
||||
ring->head = head & HEAD_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
ring->head = I915_READ_HEAD(ring);
|
||||
ring->space = ring_space(ring);
|
||||
if (ring->space >= n) {
|
||||
trace_i915_ring_wait_end(dev);
|
||||
return 0;
|
||||
|
@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|||
msleep(1);
|
||||
if (atomic_read(&dev_priv->mm.wedged))
|
||||
return -EAGAIN;
|
||||
reread = 1;
|
||||
} while (!time_after(jiffies, end));
|
||||
trace_i915_ring_wait_end (dev);
|
||||
return -EBUSY;
|
||||
|
@ -1292,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
|
||||
*ring = render_ring;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->irq_get = gen6_render_ring_get_irq;
|
||||
ring->irq_put = gen6_render_ring_put_irq;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->get_seqno = pc_render_get_seqno;
|
||||
}
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||
|
||||
ring->size = size;
|
||||
ring->effective_size = ring->size;
|
||||
if (IS_I830(ring->dev))
|
||||
ring->effective_size -= 128;
|
||||
|
||||
ring->map.offset = start;
|
||||
ring->map.size = size;
|
||||
ring->map.type = 0;
|
||||
ring->map.flags = 0;
|
||||
ring->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap_wc(&ring->map, dev);
|
||||
if (ring->map.handle == NULL) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ring->virtual_start = (void __force __iomem *)ring->map.handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
|
|
@ -166,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
|
|||
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
|
||||
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
|
||||
|
||||
/* DRI warts */
|
||||
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
|
|||
void (*irq_set_state)(void *cookie, bool state),
|
||||
unsigned int (*set_vga_decode)(void *cookie, bool decode))
|
||||
{
|
||||
int ret = -1;
|
||||
int ret = -ENODEV;
|
||||
struct vga_device *vgadev;
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -1644,7 +1644,7 @@ ks8695_cleanup(void)
|
|||
module_init(ks8695_init);
|
||||
module_exit(ks8695_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Simtec Electronics")
|
||||
MODULE_AUTHOR("Simtec Electronics");
|
||||
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:" MODULENAME);
|
||||
|
|
|
@ -943,6 +943,8 @@ static int rio_enum_complete(struct rio_mport *port)
|
|||
* @port: Master port to send transactions
|
||||
* @destid: Current destination ID in network
|
||||
* @hopcount: Number of hops into the network
|
||||
* @prev: previous rio_dev
|
||||
* @prev_port: previous port number
|
||||
*
|
||||
* Recursively discovers a RIO network. Transactions are sent via the
|
||||
* master port passed in @port.
|
||||
|
|
|
@ -176,6 +176,7 @@ static void d_free(struct dentry *dentry)
|
|||
|
||||
/**
|
||||
* dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
|
||||
* @dentry: the target dentry
|
||||
* After this call, in-progress rcu-walk path lookup will fail. This
|
||||
* should be called after unhashing, and after changing d_inode (if
|
||||
* the dentry has not already been unhashed).
|
||||
|
@ -281,6 +282,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
|
|||
/**
|
||||
* d_kill - kill dentry and return parent
|
||||
* @dentry: dentry to kill
|
||||
* @parent: parent dentry
|
||||
*
|
||||
* The dentry must already be unhashed and removed from the LRU.
|
||||
*
|
||||
|
@ -1973,7 +1975,7 @@ out:
|
|||
/**
|
||||
* d_validate - verify dentry provided from insecure source (deprecated)
|
||||
* @dentry: The dentry alleged to be valid child of @dparent
|
||||
* @parent: The parent dentry (known to be valid)
|
||||
* @dparent: The parent dentry (known to be valid)
|
||||
*
|
||||
* An insecure source has sent us a dentry, here we verify it and dget() it.
|
||||
* This is used by ncpfs in its readdir implementation.
|
||||
|
|
|
@ -364,6 +364,13 @@
|
|||
VMLINUX_SYMBOL(__start___param) = .; \
|
||||
*(__param) \
|
||||
VMLINUX_SYMBOL(__stop___param) = .; \
|
||||
} \
|
||||
\
|
||||
/* Built-in module versions. */ \
|
||||
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__start___modver) = .; \
|
||||
*(__modver) \
|
||||
VMLINUX_SYMBOL(__stop___modver) = .; \
|
||||
. = ALIGN((align)); \
|
||||
VMLINUX_SYMBOL(__end_rodata) = .; \
|
||||
} \
|
||||
|
|
|
@ -249,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
|
|||
((1 << ZONES_SHIFT) - 1);
|
||||
|
||||
if (__builtin_constant_p(bit))
|
||||
MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
else {
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
|
||||
|
|
|
@ -575,12 +575,6 @@ struct sysinfo {
|
|||
char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
|
||||
};
|
||||
|
||||
/* Force a compilation error if condition is true */
|
||||
#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
|
||||
|
||||
/* Force a compilation error if condition is constant and true */
|
||||
#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
|
||||
|
||||
/* Force a compilation error if a constant expression is not a power of 2 */
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
|
||||
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
|
||||
|
@ -592,6 +586,32 @@ struct sysinfo {
|
|||
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
|
||||
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
|
||||
|
||||
/**
|
||||
* BUILD_BUG_ON - break compile if a condition is true.
|
||||
* @cond: the condition which the compiler should know is false.
|
||||
*
|
||||
* If you have some code which relies on certain constants being equal, or
|
||||
* other compile-time-evaluated condition, you should use BUILD_BUG_ON to
|
||||
* detect if someone changes it.
|
||||
*
|
||||
* The implementation uses gcc's reluctance to create a negative array, but
|
||||
* gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
|
||||
* to inline functions). So as a fallback we use the optimizer; if it can't
|
||||
* prove the condition is false, it will cause a link error on the undefined
|
||||
* "__build_bug_on_failed". This error message can be harder to track down
|
||||
* though, hence the two different methods.
|
||||
*/
|
||||
#ifndef __OPTIMIZE__
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
#else
|
||||
extern int __build_bug_on_failed;
|
||||
#define BUILD_BUG_ON(condition) \
|
||||
do { \
|
||||
((void)sizeof(char[1 - 2*!!(condition)])); \
|
||||
if (condition) __build_bug_on_failed = 1; \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
/* Trap pasters of __FUNCTION__ at compile-time */
|
||||
#define __FUNCTION__ (__func__)
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
|
|||
\
|
||||
_n = (long) &((ptr)->name##_end) \
|
||||
- (long) &((ptr)->name##_begin); \
|
||||
MAYBE_BUILD_BUG_ON(_n < 0); \
|
||||
BUILD_BUG_ON(_n < 0); \
|
||||
\
|
||||
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
|
||||
} while (0)
|
||||
|
|
|
@ -58,6 +58,12 @@ struct module_attribute {
|
|||
void (*free)(struct module *);
|
||||
};
|
||||
|
||||
struct module_version_attribute {
|
||||
struct module_attribute mattr;
|
||||
const char *module_name;
|
||||
const char *version;
|
||||
};
|
||||
|
||||
struct module_kobject
|
||||
{
|
||||
struct kobject kobj;
|
||||
|
@ -161,7 +167,28 @@ extern struct module __this_module;
|
|||
Using this automatically adds a checksum of the .c files and the
|
||||
local headers in "srcversion".
|
||||
*/
|
||||
|
||||
#if defined(MODULE) || !defined(CONFIG_SYSFS)
|
||||
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
|
||||
#else
|
||||
#define MODULE_VERSION(_version) \
|
||||
extern ssize_t __modver_version_show(struct module_attribute *, \
|
||||
struct module *, char *); \
|
||||
static struct module_version_attribute __modver_version_attr \
|
||||
__used \
|
||||
__attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
|
||||
= { \
|
||||
.mattr = { \
|
||||
.attr = { \
|
||||
.name = "version", \
|
||||
.mode = S_IRUGO, \
|
||||
}, \
|
||||
.show = __modver_version_show, \
|
||||
}, \
|
||||
.module_name = KBUILD_MODNAME, \
|
||||
.version = _version, \
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Optional firmware file (or files) needed by the module
|
||||
* format is simply firmware file name. Multiple firmware
|
||||
|
|
|
@ -16,15 +16,17 @@
|
|||
/* Chosen so that structs with an unsigned long line up. */
|
||||
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
|
||||
|
||||
#ifdef MODULE
|
||||
#define ___module_cat(a,b) __mod_ ## a ## b
|
||||
#define __module_cat(a,b) ___module_cat(a,b)
|
||||
#ifdef MODULE
|
||||
#define __MODULE_INFO(tag, name, info) \
|
||||
static const char __module_cat(name,__LINE__)[] \
|
||||
__used __attribute__((section(".modinfo"), unused, aligned(1))) \
|
||||
= __stringify(tag) "=" info
|
||||
#else /* !MODULE */
|
||||
#define __MODULE_INFO(tag, name, info)
|
||||
/* This struct is here for syntactic coherency, it is not used */
|
||||
#define __MODULE_INFO(tag, name, info) \
|
||||
struct __module_cat(name,__LINE__) {}
|
||||
#endif
|
||||
#define __MODULE_PARM_TYPE(name, _type) \
|
||||
__MODULE_INFO(parmtype, name##type, #name ":" _type)
|
||||
|
|
|
@ -109,7 +109,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
|
|||
unsigned int fbit)
|
||||
{
|
||||
/* Did you forget to fix assumptions on max features? */
|
||||
MAYBE_BUILD_BUG_ON(fbit >= 32);
|
||||
if (__builtin_constant_p(fbit))
|
||||
BUILD_BUG_ON(fbit >= 32);
|
||||
else
|
||||
BUG_ON(fbit >= 32);
|
||||
|
||||
if (fbit < VIRTIO_TRANSPORT_F_START)
|
||||
virtio_check_driver_offered_feature(vdev, fbit);
|
||||
|
|
|
@ -719,9 +719,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
|
|||
params[i].ops->free(params[i].arg);
|
||||
}
|
||||
|
||||
static void __init kernel_add_sysfs_param(const char *name,
|
||||
struct kernel_param *kparam,
|
||||
unsigned int name_skip)
|
||||
static struct module_kobject * __init locate_module_kobject(const char *name)
|
||||
{
|
||||
struct module_kobject *mk;
|
||||
struct kobject *kobj;
|
||||
|
@ -729,10 +727,7 @@ static void __init kernel_add_sysfs_param(const char *name,
|
|||
|
||||
kobj = kset_find_obj(module_kset, name);
|
||||
if (kobj) {
|
||||
/* We already have one. Remove params so we can add more. */
|
||||
mk = to_module_kobject(kobj);
|
||||
/* We need to remove it before adding parameters. */
|
||||
sysfs_remove_group(&mk->kobj, &mk->mp->grp);
|
||||
} else {
|
||||
mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
|
||||
BUG_ON(!mk);
|
||||
|
@ -743,15 +738,36 @@ static void __init kernel_add_sysfs_param(const char *name,
|
|||
"%s", name);
|
||||
if (err) {
|
||||
kobject_put(&mk->kobj);
|
||||
printk(KERN_ERR "Module '%s' failed add to sysfs, "
|
||||
"error number %d\n", name, err);
|
||||
printk(KERN_ERR "The system will be unstable now.\n");
|
||||
return;
|
||||
printk(KERN_ERR
|
||||
"Module '%s' failed add to sysfs, error number %d\n",
|
||||
name, err);
|
||||
printk(KERN_ERR
|
||||
"The system will be unstable now.\n");
|
||||
return NULL;
|
||||
}
|
||||
/* So that exit path is even. */
|
||||
|
||||
/* So that we hold reference in both cases. */
|
||||
kobject_get(&mk->kobj);
|
||||
}
|
||||
|
||||
return mk;
|
||||
}
|
||||
|
||||
static void __init kernel_add_sysfs_param(const char *name,
|
||||
struct kernel_param *kparam,
|
||||
unsigned int name_skip)
|
||||
{
|
||||
struct module_kobject *mk;
|
||||
int err;
|
||||
|
||||
mk = locate_module_kobject(name);
|
||||
if (!mk)
|
||||
return;
|
||||
|
||||
/* We need to remove old parameters before adding more. */
|
||||
if (mk->mp)
|
||||
sysfs_remove_group(&mk->kobj, &mk->mp->grp);
|
||||
|
||||
/* These should not fail at boot. */
|
||||
err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
|
||||
BUG_ON(err);
|
||||
|
@ -796,6 +812,32 @@ static void __init param_sysfs_builtin(void)
|
|||
}
|
||||
}
|
||||
|
||||
ssize_t __modver_version_show(struct module_attribute *mattr,
|
||||
struct module *mod, char *buf)
|
||||
{
|
||||
struct module_version_attribute *vattr =
|
||||
container_of(mattr, struct module_version_attribute, mattr);
|
||||
|
||||
return sprintf(buf, "%s\n", vattr->version);
|
||||
}
|
||||
|
||||
extern struct module_version_attribute __start___modver[], __stop___modver[];
|
||||
|
||||
static void __init version_sysfs_builtin(void)
|
||||
{
|
||||
const struct module_version_attribute *vattr;
|
||||
struct module_kobject *mk;
|
||||
int err;
|
||||
|
||||
for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
|
||||
mk = locate_module_kobject(vattr->module_name);
|
||||
if (mk) {
|
||||
err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
|
||||
kobject_uevent(&mk->kobj, KOBJ_ADD);
|
||||
kobject_put(&mk->kobj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* module-related sysfs stuff */
|
||||
|
||||
|
@ -875,6 +917,7 @@ static int __init param_sysfs_init(void)
|
|||
}
|
||||
module_sysfs_initialized = 1;
|
||||
|
||||
version_sysfs_builtin();
|
||||
param_sysfs_builtin();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
|
|||
}
|
||||
module_exit(dsa_cleanup_module);
|
||||
|
||||
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
|
||||
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
|
||||
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:dsa");
|
||||
|
|
|
@ -13,8 +13,8 @@ obj-y := \
|
|||
request_key_auth.o \
|
||||
user_defined.o
|
||||
|
||||
obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
|
||||
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
|
||||
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
|
||||
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o
|
||||
obj-$(CONFIG_KEYS_COMPAT) += compat.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_SYSCTL) += sysctl.o
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <crypto/sha.h>
|
||||
#include <crypto/aes.h>
|
||||
|
||||
#include "encrypted_defined.h"
|
||||
#include "encrypted.h"
|
||||
|
||||
static const char KEY_TRUSTED_PREFIX[] = "trusted:";
|
||||
static const char KEY_USER_PREFIX[] = "user:";
|
||||
|
@ -888,6 +888,7 @@ static int __init init_encrypted(void)
|
|||
out:
|
||||
encrypted_shash_release();
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static void __exit cleanup_encrypted(void)
|
|
@ -29,7 +29,7 @@
|
|||
#include <linux/tpm.h>
|
||||
#include <linux/tpm_command.h>
|
||||
|
||||
#include "trusted_defined.h"
|
||||
#include "trusted.h"
|
||||
|
||||
static const char hmac_alg[] = "hmac(sha1)";
|
||||
static const char hash_alg[] = "sha1";
|
||||
|
@ -1032,6 +1032,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
|
|||
ret = datablob_parse(datablob, new_p, new_o);
|
||||
if (ret != Opt_update) {
|
||||
ret = -EINVAL;
|
||||
kfree(new_p);
|
||||
goto out;
|
||||
}
|
||||
/* copy old key values, and reseal with new pcrs */
|
|
@ -178,7 +178,7 @@ int cond_init_bool_indexes(struct policydb *p)
|
|||
p->bool_val_to_struct = (struct cond_bool_datum **)
|
||||
kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
|
||||
if (!p->bool_val_to_struct)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -501,8 +501,8 @@ static int policydb_index(struct policydb *p)
|
|||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = -ENOMEM;
|
||||
if (cond_init_bool_indexes(p))
|
||||
rc = cond_init_bool_indexes(p);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < SYM_NUM; i++) {
|
||||
|
|
Loading…
Reference in a new issue