summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig36
-rw-r--r--drivers/char/agp/amd-k7-agp.c24
-rw-r--r--drivers/char/agp/amd64-agp.c6
-rw-r--r--drivers/char/agp/ati-agp.c22
-rw-r--r--drivers/char/agp/efficeon-agp.c16
-rw-r--r--drivers/char/agp/intel-agp.c11
-rw-r--r--drivers/char/agp/nvidia-agp.c24
-rw-r--r--drivers/char/agp/sis-agp.c7
-rw-r--r--drivers/char/agp/via-agp.c6
-rw-r--r--drivers/char/hw_random/amd-rng.c18
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c1
-rw-r--r--drivers/char/hw_random/cn10k-rng.c1
-rw-r--r--drivers/char/hw_random/core.c17
-rw-r--r--drivers/char/hw_random/geode-rng.c36
-rw-r--r--drivers/char/hw_random/mpfs-rng.c1
-rw-r--r--drivers/char/hw_random/mtk-rng.c5
-rw-r--r--drivers/char/hw_random/npcm-rng.c15
-rw-r--r--drivers/char/hw_random/s390-trng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c8
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/ipmi/Kconfig10
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c14
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c27
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c24
-rw-r--r--drivers/char/ipmi/ssif_bmc.c873
-rw-r--r--drivers/char/random.c265
-rw-r--r--drivers/char/sonypi.c3
-rw-r--r--drivers/char/tpm/eventlog/acpi.c12
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c142
-rw-r--r--drivers/char/tpm/st33zp24/spi.c145
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c39
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h7
-rw-r--r--drivers/char/tpm/tpm-chip.c7
-rw-r--r--drivers/char/tpm/tpm-dev-common.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm_crb.c35
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c8
-rw-r--r--drivers/char/tpm/tpm_tis.c9
-rw-r--r--drivers/char/tpm/tpm_tis_core.c20
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c3
46 files changed, 1334 insertions, 600 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0f378d29dab0..30fe9848dac1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -423,40 +423,4 @@ config ADI
and SSM (Silicon Secured Memory). Intended consumers of this
driver include crash and makedumpfile.
-config RANDOM_TRUST_CPU
- bool "Initialize RNG using CPU RNG instructions"
- default y
- help
- Initialize the RNG using random numbers supplied by the CPU's
- RNG instructions (e.g. RDRAND), if supported and available. These
- random numbers are never used directly, but are rather hashed into
- the main input pool, and this happens regardless of whether or not
- this option is enabled. Instead, this option controls whether the
- they are credited and hence can initialize the RNG. Additionally,
- other sources of randomness are always used, regardless of this
- setting. Enabling this implies trusting that the CPU can supply high
- quality and non-backdoored random numbers.
-
- Say Y here unless you have reason to mistrust your CPU or believe
- its RNG facilities may be faulty. This may also be configured at
- boot time with "random.trust_cpu=on/off".
-
-config RANDOM_TRUST_BOOTLOADER
- bool "Initialize RNG using bootloader-supplied seed"
- default y
- help
- Initialize the RNG using a seed supplied by the bootloader or boot
- environment (e.g. EFI or a bootloader-generated device tree). This
- seed is not used directly, but is rather hashed into the main input
- pool, and this happens regardless of whether or not this option is
- enabled. Instead, this option controls whether the seed is credited
- and hence can initialize the RNG. Additionally, other sources of
- randomness are always used, regardless of this setting. Enabling
- this implies trusting that the bootloader can supply high quality and
- non-backdoored seeds.
-
- Say Y here unless you have reason to mistrust your bootloader or
- believe its RNG facilities may be faulty. This may also be configured
- at boot time with "random.trust_bootloader=on/off".
-
endmenu
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 2b2095542816..55397ba765d2 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -488,26 +488,11 @@ static void agp_amdk7_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
-
-static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
+static int agp_amdk7_resume(struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_amdk7_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return amd_irongate_driver.configure();
}
-#endif /* CONFIG_PM */
-
/* must be the same order as name table above */
static const struct pci_device_id agp_amdk7_pci_table[] = {
{
@@ -539,15 +524,14 @@ static const struct pci_device_id agp_amdk7_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_amdk7_pm_ops, NULL, agp_amdk7_resume);
+
static struct pci_driver agp_amdk7_pci_driver = {
.name = "agpgart-amdk7",
.id_table = agp_amdk7_pci_table,
.probe = agp_amdk7_probe,
.remove = agp_amdk7_remove,
-#ifdef CONFIG_PM
- .suspend = agp_amdk7_suspend,
- .resume = agp_amdk7_resume,
-#endif
+ .driver.pm = &agp_amdk7_pm_ops,
};
static int __init agp_amdk7_init(void)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 84a4aa9312cf..ce8651436609 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -588,9 +588,7 @@ static void agp_amd64_remove(struct pci_dev *pdev)
agp_bridges_found--;
}
-#define agp_amd64_suspend NULL
-
-static int __maybe_unused agp_amd64_resume(struct device *dev)
+static int agp_amd64_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -727,7 +725,7 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ }
};
-static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume);
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 6f5530482d83..3c1fce48aabe 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -238,23 +238,10 @@ static int ati_configure(void)
}
-#ifdef CONFIG_PM
-static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
+static int agp_ati_resume(struct device *dev)
{
- pci_save_state(dev);
- pci_set_power_state(dev, PCI_D3hot);
-
- return 0;
-}
-
-static int agp_ati_resume(struct pci_dev *dev)
-{
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
-
return ati_configure();
}
-#endif
/*
*Since we don't need contiguous memory we just try
@@ -559,15 +546,14 @@ static const struct pci_device_id agp_ati_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_ati_pm_ops, NULL, agp_ati_resume);
+
static struct pci_driver agp_ati_pci_driver = {
.name = "agpgart-ati",
.id_table = agp_ati_pci_table,
.probe = agp_ati_probe,
.remove = agp_ati_remove,
-#ifdef CONFIG_PM
- .suspend = agp_ati_suspend,
- .resume = agp_ati_resume,
-#endif
+ .driver.pm = &agp_ati_pm_ops,
};
static int __init agp_ati_init(void)
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index c53f0f9ef5b0..f28d42319269 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -412,18 +412,11 @@ static void agp_efficeon_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
-static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
-{
- return 0;
-}
-
-static int agp_efficeon_resume(struct pci_dev *pdev)
+static int agp_efficeon_resume(struct device *dev)
{
printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
return efficeon_configure();
}
-#endif
static const struct pci_device_id agp_efficeon_pci_table[] = {
{
@@ -437,6 +430,8 @@ static const struct pci_device_id agp_efficeon_pci_table[] = {
{ }
};
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_efficeon_pm_ops, NULL, agp_efficeon_resume);
+
MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
static struct pci_driver agp_efficeon_pci_driver = {
@@ -444,10 +439,7 @@ static struct pci_driver agp_efficeon_pci_driver = {
.id_table = agp_efficeon_pci_table,
.probe = agp_efficeon_probe,
.remove = agp_efficeon_remove,
-#ifdef CONFIG_PM
- .suspend = agp_efficeon_suspend,
- .resume = agp_efficeon_resume,
-#endif
+ .driver.pm = &agp_efficeon_pm_ops,
};
static int __init agp_efficeon_init(void)
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 9e4f27a6cb5a..c518b3a9db04 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -817,16 +817,15 @@ static void agp_intel_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
-static int agp_intel_resume(struct pci_dev *pdev)
+static int agp_intel_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
bridge->driver->configure();
return 0;
}
-#endif
static const struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
@@ -895,14 +894,14 @@ static const struct pci_device_id agp_intel_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_intel_pm_ops, NULL, agp_intel_resume);
+
static struct pci_driver agp_intel_pci_driver = {
.name = "agpgart-intel",
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
.remove = agp_intel_remove,
-#ifdef CONFIG_PM
- .resume = agp_intel_resume,
-#endif
+ .driver.pm = &agp_intel_pm_ops,
};
static int __init agp_intel_init(void)
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 826dbd06f6bb..dbcbc06cc202 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -404,28 +404,13 @@ static void agp_nvidia_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
-static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
+static int agp_nvidia_resume(struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int agp_nvidia_resume(struct pci_dev *pdev)
-{
- /* set power state 0 and restore PCI space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
/* reconfigure AGP hardware again */
nvidia_configure();
return 0;
}
-#endif
-
static const struct pci_device_id agp_nvidia_pci_table[] = {
{
@@ -449,15 +434,14 @@ static const struct pci_device_id agp_nvidia_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_nvidia_pm_ops, NULL, agp_nvidia_resume);
+
static struct pci_driver agp_nvidia_pci_driver = {
.name = "agpgart-nvidia",
.id_table = agp_nvidia_pci_table,
.probe = agp_nvidia_probe,
.remove = agp_nvidia_remove,
-#ifdef CONFIG_PM
- .suspend = agp_nvidia_suspend,
- .resume = agp_nvidia_resume,
-#endif
+ .driver.pm = &agp_nvidia_pm_ops,
};
static int __init agp_nvidia_init(void)
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index f8a02f4bef1b..484bb101c53b 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -217,10 +217,7 @@ static void agp_sis_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#define agp_sis_suspend NULL
-
-static int __maybe_unused agp_sis_resume(
- __attribute__((unused)) struct device *dev)
+static int agp_sis_resume(__attribute__((unused)) struct device *dev)
{
return sis_driver.configure();
}
@@ -407,7 +404,7 @@ static const struct pci_device_id agp_sis_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
-static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, NULL, agp_sis_resume);
static struct pci_driver agp_sis_pci_driver = {
.name = "agpgart-sis",
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index b2f484f527fb..bc5140af2dcb 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -489,9 +489,7 @@ static void agp_via_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#define agp_via_suspend NULL
-
-static int __maybe_unused agp_via_resume(struct device *dev)
+static int agp_via_resume(struct device *dev)
{
struct agp_bridge_data *bridge = dev_get_drvdata(dev);
@@ -551,7 +549,7 @@ static const struct pci_device_id agp_via_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
-static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_via_pm_ops, NULL, agp_via_resume);
static struct pci_driver agp_via_pci_driver = {
.name = "agpgart-via",
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index c22d4184bb61..0555e3838bce 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void)
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err)
- return err;
+ goto put_dev;
pmbase &= 0x0000FF00;
- if (pmbase == 0)
- return -EIO;
+ if (pmbase == 0) {
+ err = -EIO;
+ goto put_dev;
+ }
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
@@ -185,6 +189,8 @@ err_iomap:
release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
out:
kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
return err;
}
@@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void)
release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ pci_dev_put(priv->pcidev);
+
kfree(priv);
}
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 7c55f4cf4a8b..c99c54cd99c6 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -225,7 +225,6 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
return -ENOMEM;
rng->ops.read = cavium_rng_read;
- rng->ops.quality = 1000;
pci_set_drvdata(pdev, rng);
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index a01e9307737c..c1193f85982c 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -145,7 +145,6 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
rng->ops.read = cn10k_rng_read;
- rng->ops.quality = 1000;
rng->ops.priv = (unsigned long)rng;
reset_rng_health_state(rng);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index cc002b0c2f0c..f34d356fe2c0 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -41,14 +41,14 @@ static DEFINE_MUTEX(reading_mutex);
static int data_avail;
static u8 *rng_buffer, *rng_fillbuf;
static unsigned short current_quality;
-static unsigned short default_quality; /* = 0; default to "off" */
+static unsigned short default_quality = 1024; /* default to maximum */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
"current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
- "default entropy content of hwrng per 1024 bits of input");
+ "default maximum entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
@@ -69,8 +69,10 @@ static void add_early_randomness(struct hwrng *rng)
mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
- if (bytes_read > 0)
- add_device_randomness(rng_fillbuf, bytes_read);
+ if (bytes_read > 0) {
+ size_t entropy = bytes_read * 8 * rng->quality / 1024;
+ add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
+ }
}
static inline void cleanup_rng(struct kref *kref)
@@ -170,10 +172,7 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- if (!rng->quality)
- rng->quality = default_quality;
- if (rng->quality > 1024)
- rng->quality = 1024;
+ rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
current_quality = rng->quality; /* obsolete */
return 0;
@@ -528,7 +527,7 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
- entropy >> 10);
+ entropy >> 10, true);
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 138ce434f86b..12fbe8091831 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
+struct amd_geode_priv {
+ struct pci_dev *pcidev;
+ void __iomem *membase;
+};
static int geode_rng_data_read(struct hwrng *rng, u32 *data)
{
@@ -90,6 +94,7 @@ static int __init geode_rng_init(void)
const struct pci_device_id *ent;
void __iomem *mem;
unsigned long rng_base;
+ struct amd_geode_priv *priv;
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
@@ -97,17 +102,26 @@ static int __init geode_rng_init(void)
goto found;
}
/* Device not found. */
- goto out;
+ return err;
found:
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
+
rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0)
- goto out;
+ goto free_priv;
err = -ENOMEM;
mem = ioremap(rng_base, 0x58);
if (!mem)
- goto out;
- geode_rng.priv = (unsigned long)mem;
+ goto free_priv;
+
+ geode_rng.priv = (unsigned long)priv;
+ priv->membase = mem;
+ priv->pcidev = pdev;
pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
@@ -116,20 +130,26 @@ found:
err);
goto err_unmap;
}
-out:
return err;
err_unmap:
iounmap(mem);
- goto out;
+free_priv:
+ kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
+ return err;
}
static void __exit geode_rng_exit(void)
{
- void __iomem *mem = (void __iomem *)geode_rng.priv;
+ struct amd_geode_priv *priv;
+ priv = (struct amd_geode_priv *)geode_rng.priv;
hwrng_unregister(&geode_rng);
- iounmap(mem);
+ iounmap(priv->membase);
+ pci_dev_put(priv->pcidev);
+ kfree(priv);
}
module_init(geode_rng_init);
diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c
index 5813da617a48..c6972734ae62 100644
--- a/drivers/char/hw_random/mpfs-rng.c
+++ b/drivers/char/hw_random/mpfs-rng.c
@@ -78,7 +78,6 @@ static int mpfs_rng_probe(struct platform_device *pdev)
rng_priv->rng.read = mpfs_rng_read;
rng_priv->rng.name = pdev->name;
- rng_priv->rng.quality = 1024;
platform_set_drvdata(pdev, rng_priv);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 6c00ea008555..aa993753ab12 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -22,7 +22,7 @@
#define RNG_AUTOSUSPEND_TIMEOUT 100
#define USEC_POLL 2
-#define TIMEOUT_POLL 20
+#define TIMEOUT_POLL 60
#define RNG_CTRL 0x00
#define RNG_EN BIT(0)
@@ -77,7 +77,7 @@ static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
ready & RNG_READY, USEC_POLL,
TIMEOUT_POLL);
- return !!ready;
+ return !!(ready & RNG_READY);
}
static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -179,6 +179,7 @@ static const struct dev_pm_ops mtk_rng_pm_ops = {
#endif /* CONFIG_PM */
static const struct of_device_id mtk_rng_match[] = {
+ { .compatible = "mediatek,mt7986-rng" },
{ .compatible = "mediatek,mt7623-rng" },
{},
};
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 1ec5f267a656..9903d0357e06 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -13,11 +13,13 @@
#include <linux/delay.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
#define NPCM_RNGCS_REG 0x00 /* Control and status register */
#define NPCM_RNGD_REG 0x04 /* Data register */
#define NPCM_RNGMODE_REG 0x08 /* Mode register */
+#define NPCM_RNG_CLK_SET_62_5MHZ BIT(2) /* 60-80 MHz */
#define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */
#define NPCM_RNG_DATA_VALID BIT(1)
#define NPCM_RNG_ENABLE BIT(0)
@@ -31,14 +33,14 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ u32 clkp;
};
static int npcm_rng_init(struct hwrng *rng)
{
struct npcm_rng *priv = to_npcm_rng(rng);
- writel(NPCM_RNG_CLK_SET_25MHZ | NPCM_RNG_ENABLE,
- priv->base + NPCM_RNGCS_REG);
+ writel(priv->clkp | NPCM_RNG_ENABLE, priv->base + NPCM_RNGCS_REG);
return 0;
}
@@ -47,7 +49,7 @@ static void npcm_rng_cleanup(struct hwrng *rng)
{
struct npcm_rng *priv = to_npcm_rng(rng);
- writel(NPCM_RNG_CLK_SET_25MHZ, priv->base + NPCM_RNGCS_REG);
+ writel(priv->clkp, priv->base + NPCM_RNGCS_REG);
}
static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -109,7 +111,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
- priv->rng.quality = 1000;
+ priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
@@ -162,7 +164,10 @@ static const struct dev_pm_ops npcm_rng_pm_ops = {
};
static const struct of_device_id rng_dt_id[] __maybe_unused = {
- { .compatible = "nuvoton,npcm750-rng", },
+ { .compatible = "nuvoton,npcm750-rng",
+ .data = (void *)NPCM_RNG_CLK_SET_25MHZ },
+ { .compatible = "nuvoton,npcm845-rng",
+ .data = (void *)NPCM_RNG_CLK_SET_62_5MHZ },
{},
};
MODULE_DEVICE_TABLE(of, rng_dt_id);
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 795853dfc46b..cffa326ddc8d 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -191,7 +191,6 @@ static struct hwrng trng_hwrng_dev = {
.name = "s390-trng",
.data_read = trng_hwrng_data_read,
.read = trng_hwrng_read,
- .quality = 1024,
};
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index bc22178f83e8..a6731cf0627a 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -44,16 +44,18 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
pm_runtime_get_sync((struct device *) priv->rng.priv);
- while (max > sizeof(u32)) {
+ while (max >= sizeof(u32)) {
sr = readl_relaxed(priv->base + RNG_SR);
/* Manage timeout which is based on timer and take */
/* care of initial delay time when enabling rng */
if (!sr && wait) {
- retval = readl_relaxed_poll_timeout_atomic(priv->base
+ int err;
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base
+ RNG_SR,
sr, sr,
10, 50000);
- if (retval)
+ if (err)
dev_err((struct device *)priv->rng.priv,
"%s: timeout %x!\n", __func__, sr);
}
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 8ea1fc831eb7..26f322d19a88 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -145,8 +145,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
if (!of_property_read_u32(pdev->dev.of_node,
"quality", &i))
priv->rng_ops.quality = i;
- else
- priv->rng_ops.quality = 0;
} else {
period = pdata->period;
priv->rng_ops.quality = pdata->quality;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index a6f3a8a2aca6..f7690e0f92ed 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -148,7 +148,6 @@ static int probe_common(struct virtio_device *vdev)
.cleanup = virtio_cleanup,
.priv = (unsigned long)vi,
.name = vi->name,
- .quality = 1000,
};
vdev->priv = vi;
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 39565cf74b2c..b6c0d35fc1a5 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -169,6 +169,16 @@ config ASPEED_BT_IPMI_BMC
found on Aspeed SOCs (AST2400 and AST2500). The driver
implements the BMC side of the BT interface.
+config SSIF_IPMI_BMC
+ tristate "SSIF IPMI BMC driver"
+ depends on I2C && I2C_SLAVE
+ help
+ This enables the IPMI SMBus system interface (SSIF) at the
+ management (BMC) side.
+
+ The driver implements the BMC side of the SMBus system
+ interface (SSIF).
+
config IPMB_DEVICE_INTERFACE
tristate 'IPMB Interface handler'
depends on I2C
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 7ce790efad92..cb6138b8ded9 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
obj-$(CONFIG_IPMB_DEVICE_INTERFACE) += ipmb_dev_int.o
+obj-$(CONFIG_SSIF_IPMI_BMC) += ssif_bmc.o
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index efda90dcf5b3..ecfcb50302f6 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
+static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+ struct si_sm_io *io, enum kcs_states state)
{
- kcs->state = KCS_IDLE;
+ kcs->state = state;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,6 +140,12 @@ static unsigned int init_kcs_data(struct si_sm_data *kcs,
return 2;
}
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
+{
+ return init_kcs_data_with_state(kcs, io, KCS_IDLE);
+}
+
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -270,7 +276,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ if (kcs->state != KCS_IDLE) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -495,7 +501,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data(kcs, kcs->io);
+ init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 49a1707693c9..186f1fee7534 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -614,7 +614,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
-/**
+/*
* The driver model view of the IPMI messaging driver.
*/
static struct platform_driver ipmidriver = {
@@ -1330,6 +1330,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
+ struct module *owner;
if (!acquire_ipmi_user(user, &i)) {
/*
@@ -1392,8 +1393,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
kfree(rcvr);
}
+ owner = intf->owner;
kref_put(&intf->refcount, intf_free);
- module_put(intf->owner);
+ module_put(owner);
}
int ipmi_destroy_user(struct ipmi_user *user)
@@ -3704,12 +3706,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg,
unsigned char err)
{
+ int rv;
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = err;
msg->rsp_size = 3;
- /* It's an error, so it will never requeue, no need to check return. */
- handle_one_recv_msg(intf, msg);
+
+ /* This will never requeue, but it may ask us to free the message. */
+ rv = handle_one_recv_msg(intf, msg);
+ if (rv == 0)
+ ipmi_free_smi_msg(msg);
}
static void cleanup_smi_msgs(struct ipmi_smi *intf)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6e357ad76f2e..abddd7e43a9a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2153,6 +2153,20 @@ skip_fallback_noirq:
}
module_init(init_ipmi_si);
+static void wait_msg_processed(struct smi_info *smi_info)
+{
+ unsigned long jiffies_now;
+ long time_diff;
+
+ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_event_handler(smi_info, time_diff);
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
static void shutdown_smi(void *send_info)
{
struct smi_info *smi_info = send_info;
@@ -2187,16 +2201,13 @@ static void shutdown_smi(void *send_info)
* in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks.
*/
- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
- poll(smi_info);
- schedule_timeout_uninterruptible(1);
- }
+ wait_msg_processed(smi_info);
+
if (smi_info->handlers)
disable_si_irq(smi_info);
- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
- poll(smi_info);
- schedule_timeout_uninterruptible(1);
- }
+
+ wait_msg_processed(smi_info);
+
if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e1072809fe31..4bfd1e306616 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -74,7 +74,7 @@
/*
* Timer values
*/
-#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries. */
#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
/* How many times to we retry sending/receiving the message. */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 5b4e677929ca..0d4a8dcacfd4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kdebug.h>
+#include <linux/kstrtox.h>
#include <linux/rwsem.h>
#include <linux/errno.h>
#include <linux/uaccess.h>
@@ -212,8 +213,7 @@ static int set_param_str(const char *val, const struct kernel_param *kp)
char valcp[16];
char *s;
- strncpy(valcp, val, 15);
- valcp[15] = '\0';
+ strscpy(valcp, val, 16);
s = strstrip(valcp);
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 19c32bf50e0e..2dea8cd5a09a 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -406,13 +406,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer)
static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
{
struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+ u8 str;
/* We don't have an OBE IRQ, emulate it */
if (mask & KCS_BMC_EVENT_TYPE_OBE) {
- if (KCS_BMC_EVENT_TYPE_OBE & state)
- mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
- else
+ if (KCS_BMC_EVENT_TYPE_OBE & state) {
+ /*
+ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
+ * observe such an event before returning to the caller. This is not
+ * incorrect because OBF may have already become clear before enabling the
+ * IRQ if we had one, under which circumstance no event will be propagated
+ * anyway.
+ *
+ * The onus is on the client to perform a race-free check that it hasn't
+ * missed the event.
+ */
+ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
+ !(str & KCS_BMC_STR_OBF), 1, 100, false,
+ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ /* Time for the slow path? */
+ if (rc == -ETIMEDOUT)
+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+ } else {
del_timer(&priv->obe.timer);
+ }
}
if (mask & KCS_BMC_EVENT_TYPE_IBF) {
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
new file mode 100644
index 000000000000..caee848261e9
--- /dev/null
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The driver for BMC side of SSIF interface
+ *
+ * Copyright (c) 2022, Ampere Computing LLC
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_ssif_bmc.h>
+
+#define DEVICE_NAME "ipmi-ssif-host"
+
+#define GET_8BIT_ADDR(addr_7bit) (((addr_7bit) << 1) & 0xff)
+
+/* A standard SMBus Transaction is limited to 32 data bytes */
+#define MAX_PAYLOAD_PER_TRANSACTION 32
+/* Transaction includes the address, the command, the length and the PEC byte */
+#define MAX_TRANSACTION (MAX_PAYLOAD_PER_TRANSACTION + 4)
+
+#define MAX_IPMI_DATA_PER_START_TRANSACTION 30
+#define MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION 31
+
+#define SSIF_IPMI_SINGLEPART_WRITE 0x2
+#define SSIF_IPMI_SINGLEPART_READ 0x3
+#define SSIF_IPMI_MULTIPART_WRITE_START 0x6
+#define SSIF_IPMI_MULTIPART_WRITE_MIDDLE 0x7
+#define SSIF_IPMI_MULTIPART_WRITE_END 0x8
+#define SSIF_IPMI_MULTIPART_READ_START 0x3
+#define SSIF_IPMI_MULTIPART_READ_MIDDLE 0x9
+
+/*
+ * IPMI 2.0 Spec, section 12.7 SSIF Timing,
+ * Request-to-Response Time is T6max(250ms) - T1max(20ms) - 3ms = 227ms
+ * Recover ssif_bmc from busy state if it takes up to 500ms
+ */
+#define RESPONSE_TIMEOUT 500 /* ms */
+
+struct ssif_part_buffer {
+ u8 address;
+ u8 smbus_cmd;
+ u8 length;
+ u8 payload[MAX_PAYLOAD_PER_TRANSACTION];
+ u8 pec;
+ u8 index;
+};
+
+/*
+ * SSIF internal states:
+ * SSIF_READY 0x00 : Ready state
+ * SSIF_START 0x01 : Start smbus transaction
+ * SSIF_SMBUS_CMD 0x02 : Received SMBus command
+ * SSIF_REQ_RECVING 0x03 : Receiving request
+ * SSIF_RES_SENDING 0x04 : Sending response
+ * SSIF_ABORTING 0x05 : Aborting state
+ */
+enum ssif_state {
+ SSIF_READY,
+ SSIF_START,
+ SSIF_SMBUS_CMD,
+ SSIF_REQ_RECVING,
+ SSIF_RES_SENDING,
+ SSIF_ABORTING,
+ SSIF_STATE_MAX
+};
+
+struct ssif_bmc_ctx {
+ struct i2c_client *client;
+ struct miscdevice miscdev;
+ int msg_idx;
+ bool pec_support;
+ /* ssif bmc spinlock */
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ u8 running;
+ enum ssif_state state;
+ /* Timeout waiting for response */
+ struct timer_list response_timer;
+ bool response_timer_inited;
+ /* Flag to identify a Multi-part Read Transaction */
+ bool is_singlepart_read;
+ u8 nbytes_processed;
+ u8 remain_len;
+ u8 recv_len;
+ /* Block Number of a Multi-part Read Transaction */
+ u8 block_num;
+ bool request_available;
+ bool response_in_progress;
+ bool busy;
+ bool aborting;
+ /* Buffer for SSIF Transaction part*/
+ struct ssif_part_buffer part_buf;
+ struct ipmi_ssif_msg response;
+ struct ipmi_ssif_msg request;
+};
+
+static inline struct ssif_bmc_ctx *to_ssif_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct ssif_bmc_ctx, miscdev);
+}
+
+static const char *state_to_string(enum ssif_state state)
+{
+ switch (state) {
+ case SSIF_READY:
+ return "SSIF_READY";
+ case SSIF_START:
+ return "SSIF_START";
+ case SSIF_SMBUS_CMD:
+ return "SSIF_SMBUS_CMD";
+ case SSIF_REQ_RECVING:
+ return "SSIF_REQ_RECVING";
+ case SSIF_RES_SENDING:
+ return "SSIF_RES_SENDING";
+ case SSIF_ABORTING:
+ return "SSIF_ABORTING";
+ default:
+ return "SSIF_STATE_UNKNOWN";
+ }
+}
+
+/* Handle SSIF message that will be sent to user */
+static ssize_t ssif_bmc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ struct ipmi_ssif_msg msg;
+ unsigned long flags;
+ ssize_t ret;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ while (!ssif_bmc->request_available) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(ssif_bmc->wait_queue,
+ ssif_bmc->request_available);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ }
+
+ if (count < min_t(ssize_t,
+ sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len,
+ sizeof(struct ipmi_ssif_msg))) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ ret = -EINVAL;
+ } else {
+ count = min_t(ssize_t,
+ sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len,
+ sizeof(struct ipmi_ssif_msg));
+ memcpy(&msg, &ssif_bmc->request, count);
+ ssif_bmc->request_available = false;
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ ret = copy_to_user(buf, &msg, count);
+ }
+
+ return (ret < 0) ? ret : count;
+}
+
+/* Handle SSIF message that is written by user */
+static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ struct ipmi_ssif_msg msg;
+ unsigned long flags;
+ ssize_t ret;
+
+ if (count > sizeof(struct ipmi_ssif_msg))
+ return -EINVAL;
+
+ if (copy_from_user(&msg, buf, count))
+ return -EFAULT;
+
+ if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ while (ssif_bmc->response_in_progress) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(ssif_bmc->wait_queue,
+ !ssif_bmc->response_in_progress);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ }
+
+ /*
+ * The write must complete before the response timeout fired, otherwise
+ * the response is aborted and wait for next request
+ * Return -EINVAL if the response is aborted
+ */
+ ret = (ssif_bmc->response_timer_inited) ? 0 : -EINVAL;
+ if (ret)
+ goto exit;
+
+ del_timer(&ssif_bmc->response_timer);
+ ssif_bmc->response_timer_inited = false;
+
+ memcpy(&ssif_bmc->response, &msg, count);
+ ssif_bmc->is_singlepart_read = (msg.len <= MAX_PAYLOAD_PER_TRANSACTION);
+
+ ssif_bmc->response_in_progress = true;
+
+ /* ssif_bmc not busy */
+ ssif_bmc->busy = false;
+
+ /* Clean old request buffer */
+ memset(&ssif_bmc->request, 0, sizeof(struct ipmi_ssif_msg));
+exit:
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ return (ret < 0) ? ret : count;
+}
+
+static int ssif_bmc_open(struct inode *inode, struct file *file)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ int ret = 0;
+
+ spin_lock_irq(&ssif_bmc->lock);
+ if (!ssif_bmc->running)
+ ssif_bmc->running = 1;
+ else
+ ret = -EBUSY;
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return ret;
+}
+
+static __poll_t ssif_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ __poll_t mask = 0;
+
+ poll_wait(file, &ssif_bmc->wait_queue, wait);
+
+ spin_lock_irq(&ssif_bmc->lock);
+ /* The request is available, userspace application can get the request */
+ if (ssif_bmc->request_available)
+ mask |= EPOLLIN;
+
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return mask;
+}
+
+static int ssif_bmc_release(struct inode *inode, struct file *file)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+
+ spin_lock_irq(&ssif_bmc->lock);
+ ssif_bmc->running = 0;
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return 0;
+}
+
+/*
+ * System calls to device interface for user apps
+ */
+static const struct file_operations ssif_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = ssif_bmc_open,
+ .read = ssif_bmc_read,
+ .write = ssif_bmc_write,
+ .release = ssif_bmc_release,
+ .poll = ssif_bmc_poll,
+};
+
+/* Called with ssif_bmc->lock held. */
+static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
+{
+ /* Invalidate response in buffer to denote it having been sent. */
+ ssif_bmc->response.len = 0;
+ ssif_bmc->response_in_progress = false;
+ ssif_bmc->nbytes_processed = 0;
+ ssif_bmc->remain_len = 0;
+ ssif_bmc->busy = false;
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ wake_up_all(&ssif_bmc->wait_queue);
+}
+
+static void response_timeout(struct timer_list *t)
+{
+ struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+
+ /* Do nothing if the response is in progress */
+ if (!ssif_bmc->response_in_progress) {
+ /* Recover ssif_bmc from busy */
+ ssif_bmc->busy = false;
+ ssif_bmc->response_timer_inited = false;
+ /* Set aborting flag */
+ ssif_bmc->aborting = true;
+ }
+
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+}
+
+/* Called with ssif_bmc->lock held. */
+static void handle_request(struct ssif_bmc_ctx *ssif_bmc)
+{
+ /* set ssif_bmc to busy waiting for response */
+ ssif_bmc->busy = true;
+ /* Request message is available to process */
+ ssif_bmc->request_available = true;
+ /* Clean old response buffer */
+ memset(&ssif_bmc->response, 0, sizeof(struct ipmi_ssif_msg));
+ /* This is the new READ request.*/
+ wake_up_all(&ssif_bmc->wait_queue);
+
+ /* Armed timer to recover slave from busy state in case of no response */
+ if (!ssif_bmc->response_timer_inited) {
+ timer_setup(&ssif_bmc->response_timer, response_timeout, 0);
+ ssif_bmc->response_timer_inited = true;
+ }
+ mod_timer(&ssif_bmc->response_timer, jiffies + msecs_to_jiffies(RESPONSE_TIMEOUT));
+}
+
+static void calculate_response_part_pec(struct ssif_part_buffer *part)
+{
+ u8 addr = part->address;
+
+ /* PEC - Start Read Address */
+ part->pec = i2c_smbus_pec(0, &addr, 1);
+ /* PEC - SSIF Command */
+ part->pec = i2c_smbus_pec(part->pec, &part->smbus_cmd, 1);
+ /* PEC - Restart Write Address */
+ addr = addr | 0x01;
+ part->pec = i2c_smbus_pec(part->pec, &addr, 1);
+ part->pec = i2c_smbus_pec(part->pec, &part->length, 1);
+ if (part->length)
+ part->pec = i2c_smbus_pec(part->pec, part->payload, part->length);
+}
+
+static void set_singlepart_response_buffer(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+
+ part->address = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ part->length = (u8)ssif_bmc->response.len;
+
+ /* Clear the rest to 0 */
+ memset(part->payload + part->length, 0, MAX_PAYLOAD_PER_TRANSACTION - part->length);
+ memcpy(&part->payload[0], &ssif_bmc->response.payload[0], part->length);
+}
+
+static void set_multipart_response_buffer(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ u8 part_len = 0;
+
+ part->address = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ switch (part->smbus_cmd) {
+ case SSIF_IPMI_MULTIPART_READ_START:
+ /*
+ * Read Start length is 32 bytes.
+ * Read Start transfer first 30 bytes of IPMI response
+ * and 2 special code 0x00, 0x01.
+ */
+ ssif_bmc->nbytes_processed = 0;
+ ssif_bmc->block_num = 0;
+ part->length = MAX_PAYLOAD_PER_TRANSACTION;
+ part_len = MAX_IPMI_DATA_PER_START_TRANSACTION;
+ ssif_bmc->remain_len = ssif_bmc->response.len - part_len;
+
+ part->payload[0] = 0x00; /* Start Flag */
+ part->payload[1] = 0x01; /* Start Flag */
+
+ memcpy(&part->payload[2], &ssif_bmc->response.payload[0], part_len);
+ break;
+
+ case SSIF_IPMI_MULTIPART_READ_MIDDLE:
+ /*
+ * IPMI READ Middle or READ End messages can carry up to 31 bytes
+ * IPMI data plus block number byte.
+ */
+ if (ssif_bmc->remain_len <= MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION) {
+ /*
+ * This is READ End message
+ * Return length is the remaining response data length
+ * plus block number
+ * Block number 0xFF is to indicate this is last message
+ *
+ */
+ /* Clean the buffer */
+ memset(&part->payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION);
+ part->length = ssif_bmc->remain_len + 1;
+ part_len = ssif_bmc->remain_len;
+ ssif_bmc->block_num = 0xFF;
+ part->payload[0] = ssif_bmc->block_num;
+ } else {
+ /*
+ * This is READ Middle message
+ * Response length is the maximum SMBUS transfer length
+ * Block number byte is incremented
+ * Return length is maximum SMBUS transfer length
+ */
+ part->length = MAX_PAYLOAD_PER_TRANSACTION;
+ part_len = MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION;
+ part->payload[0] = ssif_bmc->block_num;
+ ssif_bmc->block_num++;
+ }
+
+ ssif_bmc->remain_len -= part_len;
+ memcpy(&part->payload[1], ssif_bmc->response.payload + ssif_bmc->nbytes_processed,
+ part_len);
+ break;
+
+ default:
+ /* Do not expect to go to this case */
+ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n",
+ __func__, part->smbus_cmd);
+ break;
+ }
+
+ ssif_bmc->nbytes_processed += part_len;
+}
+
+static bool supported_read_cmd(u8 cmd)
+{
+ if (cmd == SSIF_IPMI_SINGLEPART_READ ||
+ cmd == SSIF_IPMI_MULTIPART_READ_START ||
+ cmd == SSIF_IPMI_MULTIPART_READ_MIDDLE)
+ return true;
+
+ return false;
+}
+
+static bool supported_write_cmd(u8 cmd)
+{
+ if (cmd == SSIF_IPMI_SINGLEPART_WRITE ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_START ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_MIDDLE ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_END)
+ return true;
+
+ return false;
+}
+
+/* Process the IPMI response that will be read by master */
+static void handle_read_processed(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+
+ /* msg_idx start from 0 */
+ if (part->index < part->length)
+ *val = part->payload[part->index];
+ else if (part->index == part->length && ssif_bmc->pec_support)
+ *val = part->pec;
+ else
+ *val = 0;
+
+ part->index++;
+}
+
+static void handle_write_received(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ /*
+ * The msg_idx must be 1 when first enter SSIF_REQ_RECVING state
+ * And it would never exceeded 36 bytes included the 32 bytes max payload +
+ * the address + the command + the len and the PEC.
+ */
+ if (ssif_bmc->msg_idx < 1 || ssif_bmc->msg_idx > MAX_TRANSACTION)
+ return;
+
+ if (ssif_bmc->msg_idx == 1) {
+ ssif_bmc->part_buf.length = *val;
+ ssif_bmc->part_buf.index = 0;
+ } else {
+ ssif_bmc->part_buf.payload[ssif_bmc->part_buf.index++] = *val;
+ }
+
+ ssif_bmc->msg_idx++;
+}
+
+static bool validate_request_part(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ bool ret = true;
+ u8 cpec;
+ u8 addr;
+
+ if (part->index == part->length) {
+ /* PEC is not included */
+ ssif_bmc->pec_support = false;
+ ret = true;
+ goto exit;
+ }
+
+ if (part->index != part->length + 1) {
+ ret = false;
+ goto exit;
+ }
+
+ /* PEC is included */
+ ssif_bmc->pec_support = true;
+ part->pec = part->payload[part->length];
+ addr = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ cpec = i2c_smbus_pec(0, &addr, 1);
+ cpec = i2c_smbus_pec(cpec, &part->smbus_cmd, 1);
+ cpec = i2c_smbus_pec(cpec, &part->length, 1);
+ /*
+ * As SMBus specification does not allow the length
+ * (byte count) in the Write-Block protocol to be zero.
+ * Therefore, it is illegal to have the last Middle
+ * transaction in the sequence carry 32-byte and have
+ * a length of ‘0’ in the End transaction.
+ * But some users may try to use this way and we should
+ * prevent ssif_bmc driver broken in this case.
+ */
+ if (part->length)
+ cpec = i2c_smbus_pec(cpec, part->payload, part->length);
+
+ if (cpec != part->pec)
+ ret = false;
+
+exit:
+ return ret;
+}
+
+static void process_request_part(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ unsigned int len;
+
+ switch (part->smbus_cmd) {
+ case SSIF_IPMI_SINGLEPART_WRITE:
+ /* save the whole part to request*/
+ ssif_bmc->request.len = part->length;
+ memcpy(ssif_bmc->request.payload, part->payload, part->length);
+
+ break;
+ case SSIF_IPMI_MULTIPART_WRITE_START:
+ ssif_bmc->request.len = 0;
+
+ fallthrough;
+ case SSIF_IPMI_MULTIPART_WRITE_MIDDLE:
+ case SSIF_IPMI_MULTIPART_WRITE_END:
+ len = ssif_bmc->request.len + part->length;
+ /* Do the bound check here, not allow the request len exceed 254 bytes */
+ if (len > IPMI_SSIF_PAYLOAD_MAX) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: Request exceeded 254 bytes, aborting");
+ /* Request too long, aborting */
+ ssif_bmc->aborting = true;
+ } else {
+ memcpy(ssif_bmc->request.payload + ssif_bmc->request.len,
+ part->payload, part->length);
+ ssif_bmc->request.len += part->length;
+ }
+ break;
+ default:
+ /* Do not expect to go to this case */
+ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n",
+ __func__, part->smbus_cmd);
+ break;
+ }
+}
+
+static void process_smbus_cmd(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ /* SMBUS command can vary (single or multi-part) */
+ ssif_bmc->part_buf.smbus_cmd = *val;
+ ssif_bmc->msg_idx = 1;
+ memset(&ssif_bmc->part_buf.payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION);
+
+ if (*val == SSIF_IPMI_SINGLEPART_WRITE || *val == SSIF_IPMI_MULTIPART_WRITE_START) {
+ /*
+ * The response maybe not come in-time, causing host SSIF driver
+ * to timeout and resend a new request. In such case check for
+ * pending response and clear it
+ */
+ if (ssif_bmc->response_in_progress)
+ complete_response(ssif_bmc);
+
+ /* This is new request, flip aborting flag if set */
+ if (ssif_bmc->aborting)
+ ssif_bmc->aborting = false;
+ }
+}
+
+static void on_read_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected READ REQUESTED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ *val = 0;
+ return;
+
+ } else if (ssif_bmc->state == SSIF_SMBUS_CMD) {
+ if (!supported_read_cmd(ssif_bmc->part_buf.smbus_cmd)) {
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus read command=0x%x",
+ ssif_bmc->part_buf.smbus_cmd);
+ ssif_bmc->aborting = true;
+ }
+
+ if (ssif_bmc->aborting)
+ ssif_bmc->state = SSIF_ABORTING;
+ else
+ ssif_bmc->state = SSIF_RES_SENDING;
+ }
+
+ ssif_bmc->msg_idx = 0;
+
+ /* Send 0 if there is nothing to send */
+ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) {
+ *val = 0;
+ return;
+ }
+
+ if (ssif_bmc->is_singlepart_read)
+ set_singlepart_response_buffer(ssif_bmc);
+ else
+ set_multipart_response_buffer(ssif_bmc);
+
+ calculate_response_part_pec(&ssif_bmc->part_buf);
+ ssif_bmc->part_buf.index = 0;
+ *val = ssif_bmc->part_buf.length;
+}
+
+static void on_read_processed_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_SMBUS_CMD) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected READ PROCESSED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ *val = 0;
+ return;
+ }
+
+ /* Send 0 if there is nothing to send */
+ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) {
+ *val = 0;
+ return;
+ }
+
+ handle_read_processed(ssif_bmc, val);
+}
+
+static void on_write_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_SMBUS_CMD) {
+ ssif_bmc->state = SSIF_START;
+
+ } else if (ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected WRITE REQUEST in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ return;
+ }
+
+ ssif_bmc->msg_idx = 0;
+ ssif_bmc->part_buf.address = *val;
+}
+
+static void on_write_received_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected WRITE RECEIVED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+
+ } else if (ssif_bmc->state == SSIF_START) {
+ ssif_bmc->state = SSIF_SMBUS_CMD;
+
+ } else if (ssif_bmc->state == SSIF_SMBUS_CMD) {
+ if (!supported_write_cmd(ssif_bmc->part_buf.smbus_cmd)) {
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus write command=0x%x",
+ ssif_bmc->part_buf.smbus_cmd);
+ ssif_bmc->aborting = true;
+ }
+
+ if (ssif_bmc->aborting)
+ ssif_bmc->state = SSIF_ABORTING;
+ else
+ ssif_bmc->state = SSIF_REQ_RECVING;
+ }
+
+ /* This is response sending state */
+ if (ssif_bmc->state == SSIF_REQ_RECVING)
+ handle_write_received(ssif_bmc, val);
+ else if (ssif_bmc->state == SSIF_SMBUS_CMD)
+ process_smbus_cmd(ssif_bmc, val);
+}
+
+static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_SMBUS_CMD ||
+ ssif_bmc->state == SSIF_ABORTING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected SLAVE STOP in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_READY;
+
+ } else if (ssif_bmc->state == SSIF_REQ_RECVING) {
+ if (validate_request_part(ssif_bmc)) {
+ process_request_part(ssif_bmc);
+ if (ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_SINGLEPART_WRITE ||
+ ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_MULTIPART_WRITE_END)
+ handle_request(ssif_bmc);
+ ssif_bmc->state = SSIF_READY;
+ } else {
+ /*
+ * A BMC that receives an invalid request drop the data for the write
+ * transaction and any further transactions (read or write) until
+ * the next valid read or write Start transaction is received
+ */
+ dev_err(&ssif_bmc->client->dev, "Error: invalid pec\n");
+ ssif_bmc->aborting = true;
+ }
+ } else if (ssif_bmc->state == SSIF_RES_SENDING) {
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ /* Invalidate response buffer to denote it is sent */
+ complete_response(ssif_bmc);
+ ssif_bmc->state = SSIF_READY;
+ }
+
+ /* Reset message index */
+ ssif_bmc->msg_idx = 0;
+}
+
+/*
+ * Callback function to handle I2C slave events
+ */
+static int ssif_bmc_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val)
+{
+ unsigned long flags;
+ struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client);
+ int ret = 0;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+
+ switch (event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ on_read_requested_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
+ on_write_requested_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ on_read_processed_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ on_write_received_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_STOP:
+ on_stop_event(ssif_bmc, val);
+ break;
+
+ default:
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown i2c slave event\n");
+ break;
+ }
+
+ if (!ssif_bmc->aborting && ssif_bmc->busy)
+ ret = -EBUSY;
+
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ return ret;
+}
+
+static int ssif_bmc_probe(struct i2c_client *client)
+{
+ struct ssif_bmc_ctx *ssif_bmc;
+ int ret;
+
+ ssif_bmc = devm_kzalloc(&client->dev, sizeof(*ssif_bmc), GFP_KERNEL);
+ if (!ssif_bmc)
+ return -ENOMEM;
+
+ spin_lock_init(&ssif_bmc->lock);
+
+ init_waitqueue_head(&ssif_bmc->wait_queue);
+ ssif_bmc->request_available = false;
+ ssif_bmc->response_in_progress = false;
+ ssif_bmc->busy = false;
+ ssif_bmc->response_timer_inited = false;
+
+ /* Register misc device interface */
+ ssif_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ ssif_bmc->miscdev.name = DEVICE_NAME;
+ ssif_bmc->miscdev.fops = &ssif_bmc_fops;
+ ssif_bmc->miscdev.parent = &client->dev;
+ ret = misc_register(&ssif_bmc->miscdev);
+ if (ret)
+ return ret;
+
+ ssif_bmc->client = client;
+ ssif_bmc->client->flags |= I2C_CLIENT_SLAVE;
+
+ /* Register I2C slave */
+ i2c_set_clientdata(client, ssif_bmc);
+ ret = i2c_slave_register(client, ssif_bmc_cb);
+ if (ret)
+ misc_deregister(&ssif_bmc->miscdev);
+
+ return ret;
+}
+
+static void ssif_bmc_remove(struct i2c_client *client)
+{
+ struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+ misc_deregister(&ssif_bmc->miscdev);
+}
+
+static const struct of_device_id ssif_bmc_match[] = {
+ { .compatible = "ssif-bmc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ssif_bmc_match);
+
+static const struct i2c_device_id ssif_bmc_id[] = {
+ { DEVICE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ssif_bmc_id);
+
+static struct i2c_driver ssif_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ssif_bmc_match,
+ },
+ .probe_new = ssif_bmc_probe,
+ .remove = ssif_bmc_remove,
+ .id_table = ssif_bmc_id,
+};
+
+module_i2c_driver(ssif_bmc_driver);
+
+MODULE_AUTHOR("Quan Nguyen <quan@os.amperecomputing.com>");
+MODULE_AUTHOR("Chuong Tran <chuong@os.amperecomputing.com>");
+MODULE_DESCRIPTION("Linux device driver of the BMC IPMI SSIF interface.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 69754155300e..5885ed574c6a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <linux/siphash.h>
+#include <linux/sched/isolation.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
#include <asm/processor.h>
@@ -84,6 +85,7 @@ static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
+static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
/* Control how we warn userspace. */
static struct ratelimit_state urandom_warning =
@@ -120,7 +122,7 @@ static void try_to_generate_entropy(void);
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
* device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
- * int,long} family of functions. Using any of these functions without first
+ * long} family of functions. Using any of these functions without first
* calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
@@ -140,6 +142,26 @@ int wait_for_random_bytes(void)
}
EXPORT_SYMBOL(wait_for_random_bytes);
+/*
+ * Add a callback function that will be invoked when the crng is initialised,
+ * or immediately if it already has been. Only use this is you are absolutely
+ * sure it is required. Most users should instead be able to test
+ * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
+ */
+int __cold execute_with_initialized_rng(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&random_ready_notifier.lock, flags);
+ if (crng_ready())
+ nb->notifier_call(nb, 0, NULL);
+ else
+ ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
+ spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
+ return ret;
+}
+
#define warn_unseeded_randomness() \
if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
@@ -160,6 +182,9 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* u8 get_random_u8()
* u16 get_random_u16()
* u32 get_random_u32()
+ * u32 get_random_u32_below(u32 ceil)
+ * u32 get_random_u32_above(u32 floor)
+ * u32 get_random_u32_inclusive(u32 floor, u32 ceil)
* u64 get_random_u64()
* unsigned long get_random_long()
*
@@ -179,7 +204,6 @@ enum {
static struct {
u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
- unsigned long birth;
unsigned long generation;
spinlock_t lock;
} base_crng = {
@@ -197,16 +221,41 @@ static DEFINE_PER_CPU(struct crng, crngs) = {
.lock = INIT_LOCAL_LOCK(crngs.lock),
};
+/*
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
+ * proportional to the uptime.
+ */
+static unsigned int crng_reseed_interval(void)
+{
+ static bool early_boot = true;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return CRNG_RESEED_INTERVAL;
+}
+
/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
static void extract_entropy(void *buf, size_t len);
/* This extracts a new crng key from the input pool. */
-static void crng_reseed(void)
+static void crng_reseed(struct work_struct *work)
{
+ static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
unsigned long flags;
unsigned long next_gen;
u8 key[CHACHA_KEY_SIZE];
+ /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
+ if (likely(system_unbound_wq))
+ queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+
extract_entropy(key, sizeof(key));
/*
@@ -221,7 +270,6 @@ static void crng_reseed(void)
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
- WRITE_ONCE(base_crng.birth, jiffies);
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -261,26 +309,6 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
}
/*
- * Return the interval until the next reseeding, which is normally
- * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
- * proportional to the uptime.
- */
-static unsigned int crng_reseed_interval(void)
-{
- static bool early_boot = true;
-
- if (unlikely(READ_ONCE(early_boot))) {
- time64_t uptime = ktime_get_seconds();
- if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
- WRITE_ONCE(early_boot, false);
- else
- return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
- }
- return CRNG_RESEED_INTERVAL;
-}
-
-/*
* This function returns a ChaCha state that you may use for generating
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
@@ -315,13 +343,6 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
return;
}
- /*
- * If the base_crng is old enough, we reseed, which in turn bumps the
- * generation counter that we check below.
- */
- if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
- crng_reseed();
-
local_lock_irqsave(&crngs.lock, flags);
crng = raw_cpu_ptr(&crngs);
@@ -383,11 +404,11 @@ static void _get_random_bytes(void *buf, size_t len)
}
/*
- * This function is the exported kernel interface. It returns some number of
- * good random numbers, suitable for key generation, seeding TCP sequence
- * numbers, etc. In order to ensure that the randomness returned by this
- * function is okay, the function wait_for_random_bytes() should be called and
- * return 0 at least once at any point prior.
+ * This returns random bytes in arbitrary quantities. The quality of the
+ * random bytes is good as /dev/urandom. In order to ensure that the
+ * randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
@@ -510,6 +531,41 @@ DEFINE_BATCHED_ENTROPY(u16)
DEFINE_BATCHED_ENTROPY(u32)
DEFINE_BATCHED_ENTROPY(u64)
+u32 __get_random_u32_below(u32 ceil)
+{
+ /*
+ * This is the slow path for variable ceil. It is still fast, most of
+ * the time, by doing traditional reciprocal multiplication and
+ * opportunistically comparing the lower half to ceil itself, before
+ * falling back to computing a larger bound, and then rejecting samples
+ * whose lower half would indicate a range indivisible by ceil. The use
+ * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
+ * in 32-bits.
+ */
+ u32 rand = get_random_u32();
+ u64 mult;
+
+ /*
+ * This function is technically undefined for ceil == 0, and in fact
+ * for the non-underscored constant version in the header, we build bug
+ * on that. But for the non-constant case, it's convenient to have that
+ * evaluate to being a straight call to get_random_u32(), so that
+ * get_random_u32_inclusive() can work over its whole range without
+ * undefined behavior.
+ */
+ if (unlikely(!ceil))
+ return rand;
+
+ mult = (u64)ceil * rand;
+ if (unlikely((u32)mult < ceil)) {
+ u32 bound = -ceil % ceil;
+ while (unlikely((u32)mult < bound))
+ mult = (u64)ceil * get_random_u32();
+ }
+ return mult >> 32;
+}
+EXPORT_SYMBOL(__get_random_u32_below);
+
#ifdef CONFIG_SMP
/*
* This function is called when the CPU is coming up, with entry
@@ -660,9 +716,10 @@ static void __cold _credit_init_bits(size_t bits)
} while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
- crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
+ atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -689,7 +746,7 @@ static void __cold _credit_init_bits(size_t bits)
* the above entropy accumulation routines:
*
* void add_device_randomness(const void *buf, size_t len);
- * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
* void add_bootloader_randomness(const void *buf, size_t len);
* void add_vmfork_randomness(const void *unique_vm_id, size_t len);
* void add_interrupt_randomness(int irq);
@@ -710,7 +767,7 @@ static void __cold _credit_init_bits(size_t bits)
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
- * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ * command line option 'random.trust_bootloader'.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
@@ -736,8 +793,8 @@ static void __cold _credit_init_bits(size_t bits)
*
**********************************************************************/
-static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static bool trust_cpu __initdata = true;
+static bool trust_bootloader __initdata = true;
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
@@ -768,7 +825,7 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
if (crng_ready() && (action == PM_RESTORE_PREPARE ||
(action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
!IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
- crng_reseed();
+ crng_reseed(NULL);
pr_notice("crng reseeded on system resumption\n");
}
return 0;
@@ -791,13 +848,13 @@ void __init random_init_early(const char *command_line)
#endif
for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
- longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
- longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
@@ -812,7 +869,7 @@ void __init random_init_early(const char *command_line)
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
- crng_reseed();
+ crng_reseed(NULL);
else if (trust_cpu)
_credit_init_bits(arch_bits);
}
@@ -840,7 +897,7 @@ void __init random_init(void)
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
- crng_reseed();
+ crng_reseed(NULL);
WARN_ON(register_pm_notifier(&pm_notifier));
@@ -869,11 +926,11 @@ void add_device_randomness(const void *buf, size_t len)
EXPORT_SYMBOL(add_device_randomness);
/*
- * Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
+ * Interface for in-kernel drivers of true hardware RNGs. Those devices
+ * may produce endless random bits, so this function will sleep for
+ * some amount of time after, if the sleep_after parameter is true.
*/
-void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
{
mix_pool_bytes(buf, len);
credit_init_bits(entropy);
@@ -882,14 +939,14 @@ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
* Throttle writing to once every reseed interval, unless we're not yet
* initialized or no entropy is credited.
*/
- if (!kthread_should_stop() && (crng_ready() || !entropy))
+ if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
/*
- * Handle random seed passed by bootloader, and credit it if
- * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ * Handle random seed passed by bootloader, and credit it depending
+ * on the command line option 'random.trust_bootloader'.
*/
void __init add_bootloader_randomness(const void *buf, size_t len)
{
@@ -910,7 +967,7 @@ void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
{
add_device_randomness(unique_vm_id, len);
if (crng_ready()) {
- crng_reseed();
+ crng_reseed(NULL);
pr_notice("crng reseeded due to virtual machine fork\n");
}
blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
@@ -1176,66 +1233,102 @@ void __cold rand_initialize_disk(struct gendisk *disk)
struct entropy_timer_state {
unsigned long entropy;
struct timer_list timer;
- unsigned int samples, samples_per_bit;
+ atomic_t samples;
+ unsigned int samples_per_bit;
};
/*
- * Each time the timer fires, we expect that we got an unpredictable
- * jump in the cycle counter. Even if the timer is running on another
- * CPU, the timer activity will be touching the stack of the CPU that is
- * generating entropy..
+ * Each time the timer fires, we expect that we got an unpredictable jump in
+ * the cycle counter. Even if the timer is running on another CPU, the timer
+ * activity will be touching the stack of the CPU that is generating entropy.
*
- * Note that we don't re-arm the timer in the timer itself - we are
- * happy to be scheduled away, since that just makes the load more
- * complex, but we do not want the timer to keep ticking unless the
- * entropy loop is running.
+ * Note that we don't re-arm the timer in the timer itself - we are happy to be
+ * scheduled away, since that just makes the load more complex, but we do not
+ * want the timer to keep ticking unless the entropy loop is running.
*
* So the re-arming always happens in the entropy loop itself.
*/
static void __cold entropy_timer(struct timer_list *timer)
{
struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
+ unsigned long entropy = random_get_entropy();
- if (++state->samples == state->samples_per_bit) {
+ mix_pool_bytes(&entropy, sizeof(entropy));
+ if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
credit_init_bits(1);
- state->samples = 0;
- }
}
/*
- * If we have an actual cycle counter, see if we can
- * generate enough entropy with timing noise
+ * If we have an actual cycle counter, see if we can generate enough entropy
+ * with timing noise.
*/
static void __cold try_to_generate_entropy(void)
{
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
- struct entropy_timer_state stack;
+ u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
+ struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
- stack.entropy = random_get_entropy();
- if (stack.entropy != last)
+ stack->entropy = random_get_entropy();
+ if (stack->entropy != last)
++num_different;
- last = stack.entropy;
+ last = stack->entropy;
}
- stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
- if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
+ stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
+ if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
return;
- stack.samples = 0;
- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+ atomic_set(&stack->samples, 0);
+ timer_setup_on_stack(&stack->timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
- if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies);
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ /*
+ * Check !timer_pending() and then ensure that any previous callback has finished
+ * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ */
+ if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ struct cpumask timer_cpus;
+ unsigned int num_cpus;
+
+ /*
+ * Preemption must be disabled here, both to read the current CPU number
+ * and to avoid scheduling a timer on a dead CPU.
+ */
+ preempt_disable();
+
+ /* Only schedule callbacks on timer CPUs that are online. */
+ cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(&timer_cpus);
+ /* In very bizarre case of misconfiguration, fallback to all online. */
+ if (unlikely(num_cpus == 0)) {
+ timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(&timer_cpus);
+ }
+
+ /* Basic CPU round-robin, which avoids the current CPU. */
+ do {
+ cpu = cpumask_next(cpu, &timer_cpus);
+ if (cpu == nr_cpumask_bits)
+ cpu = cpumask_first(&timer_cpus);
+ } while (cpu == smp_processor_id() && num_cpus > 1);
+
+ /* Expiring the timer at `jiffies` means it's the next tick. */
+ stack->timer.expires = jiffies;
+
+ add_timer_on(&stack->timer, cpu);
+
+ preempt_enable();
+ }
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
schedule();
- stack.entropy = random_get_entropy();
+ stack->entropy = random_get_entropy();
}
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
- del_timer_sync(&stack.timer);
- destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ del_timer_sync(&stack->timer);
+ destroy_timer_on_stack(&stack->timer);
}
@@ -1291,7 +1384,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
return ret;
}
- ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
if (unlikely(ret))
return ret;
return get_random_bytes_user(&iter);
@@ -1409,7 +1502,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(len, p++))
return -EFAULT;
- ret = import_single_range(WRITE, p, len, &iov, &iter);
+ ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
if (unlikely(ret))
return ret;
ret = write_pool_user(&iter);
@@ -1432,7 +1525,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (!crng_ready())
return -ENODATA;
- crng_reseed();
+ crng_reseed(NULL);
return 0;
default:
return -EINVAL;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 27e301a6bb7a..9211531689b2 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1123,10 +1123,9 @@ static int sonypi_acpi_add(struct acpi_device *device)
return 0;
}
-static int sonypi_acpi_remove(struct acpi_device *device)
+static void sonypi_acpi_remove(struct acpi_device *device)
{
sonypi_acpi_device = NULL;
- return 0;
}
static const struct acpi_device_id sonypi_device_ids[] = {
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 1b18ce5ebab1..0913d3eb8d51 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
return -ENODEV;
if (tbl->header.length <
- sizeof(*tbl) + sizeof(struct acpi_tpm2_phy))
+ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
+ acpi_put_table((struct acpi_table_header *)tbl);
return -ENODEV;
+ }
tpm2_phy = (void *)tbl + sizeof(*tbl);
len = tpm2_phy->log_area_minimum_length;
start = tpm2_phy->log_area_start_address;
- if (!start || !len)
+ if (!start || !len) {
+ acpi_put_table((struct acpi_table_header *)tbl);
return -ENODEV;
+ }
+ acpi_put_table((struct acpi_table_header *)tbl);
format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
} else {
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
@@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
break;
}
+ acpi_put_table((struct acpi_table_header *)buff);
format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
+
if (!len) {
dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO;
@@ -156,5 +163,4 @@ err:
kfree(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
-
}
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index a3aa411389e7..8156bb2af78c 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -6,13 +6,9 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/tpm.h>
-#include <linux/platform_data/st33zp24.h>
#include "../tpm.h"
#include "st33zp24.h"
@@ -22,7 +18,6 @@
struct st33zp24_i2c_phy {
struct i2c_client *client;
u8 buf[ST33ZP24_BUFSIZE + 1];
- int io_lpcpd;
};
/*
@@ -99,115 +94,6 @@ static const struct st33zp24_phy_ops i2c_phy_ops = {
.recv = st33zp24_i2c_recv,
};
-static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
-
-static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
- { "lpcpd-gpios", &lpcpd_gpios, 1 },
- {},
-};
-
-static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client)
-{
- struct tpm_chip *chip = i2c_get_clientdata(client);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
- struct gpio_desc *gpiod_lpcpd;
- struct device *dev = &client->dev;
- int ret;
-
- ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
- if (ret)
- return ret;
-
- /* Get LPCPD GPIO from ACPI */
- gpiod_lpcpd = devm_gpiod_get(dev, "lpcpd", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod_lpcpd)) {
- dev_err(&client->dev,
- "Failed to retrieve lpcpd-gpios from acpi.\n");
- phy->io_lpcpd = -1;
- /*
- * lpcpd pin is not specified. This is not an issue as
- * power management can be also managed by TPM specific
- * commands. So leave with a success status code.
- */
- return 0;
- }
-
- phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
-
- return 0;
-}
-
-static int st33zp24_i2c_of_request_resources(struct i2c_client *client)
-{
- struct tpm_chip *chip = i2c_get_clientdata(client);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
- struct device_node *pp;
- int gpio;
- int ret;
-
- pp = client->dev.of_node;
- if (!pp) {
- dev_err(&client->dev, "No platform data\n");
- return -ENODEV;
- }
-
- /* Get GPIO from device tree */
- gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
- if (gpio < 0) {
- dev_err(&client->dev,
- "Failed to retrieve lpcpd-gpios from dts.\n");
- phy->io_lpcpd = -1;
- /*
- * lpcpd pin is not specified. This is not an issue as
- * power management can be also managed by TPM specific
- * commands. So leave with a success status code.
- */
- return 0;
- }
- /* GPIO request and configuration */
- ret = devm_gpio_request_one(&client->dev, gpio,
- GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
- if (ret) {
- dev_err(&client->dev, "Failed to request lpcpd pin\n");
- return -ENODEV;
- }
- phy->io_lpcpd = gpio;
-
- return 0;
-}
-
-static int st33zp24_i2c_request_resources(struct i2c_client *client)
-{
- struct tpm_chip *chip = i2c_get_clientdata(client);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
- struct st33zp24_platform_data *pdata;
- int ret;
-
- pdata = client->dev.platform_data;
- if (!pdata) {
- dev_err(&client->dev, "No platform data\n");
- return -ENODEV;
- }
-
- /* store for late use */
- phy->io_lpcpd = pdata->io_lpcpd;
-
- if (gpio_is_valid(pdata->io_lpcpd)) {
- ret = devm_gpio_request_one(&client->dev,
- pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
- "TPM IO_LPCPD");
- if (ret) {
- dev_err(&client->dev, "Failed to request lpcpd pin\n");
- return ret;
- }
- }
-
- return 0;
-}
-
/*
* st33zp24_i2c_probe initialize the TPM device
* @param: client, the i2c_client description (TPM I2C description).
@@ -218,16 +104,8 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client)
static int st33zp24_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret;
- struct st33zp24_platform_data *pdata;
struct st33zp24_i2c_phy *phy;
- if (!client) {
- pr_info("%s: i2c client is NULL. Device not accessible.\n",
- __func__);
- return -ENODEV;
- }
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_info(&client->dev, "client not i2c capable\n");
return -ENODEV;
@@ -240,23 +118,7 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
phy->client = client;
- pdata = client->dev.platform_data;
- if (!pdata && client->dev.of_node) {
- ret = st33zp24_i2c_of_request_resources(client);
- if (ret)
- return ret;
- } else if (pdata) {
- ret = st33zp24_i2c_request_resources(client);
- if (ret)
- return ret;
- } else if (ACPI_HANDLE(&client->dev)) {
- ret = st33zp24_i2c_acpi_request_resources(client);
- if (ret)
- return ret;
- }
-
- return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq,
- phy->io_lpcpd);
+ return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq);
}
/*
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index 22d184884694..2154059f0235 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -6,13 +6,9 @@
#include <linux/module.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/tpm.h>
-#include <linux/platform_data/st33zp24.h>
#include "../tpm.h"
#include "st33zp24.h"
@@ -61,7 +57,6 @@ struct st33zp24_spi_phy {
u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
- int io_lpcpd;
int latency;
};
@@ -218,115 +213,6 @@ static const struct st33zp24_phy_ops spi_phy_ops = {
.recv = st33zp24_spi_recv,
};
-static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
-
-static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
- { "lpcpd-gpios", &lpcpd_gpios, 1 },
- {},
-};
-
-static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev)
-{
- struct tpm_chip *chip = spi_get_drvdata(spi_dev);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
- struct gpio_desc *gpiod_lpcpd;
- struct device *dev = &spi_dev->dev;
- int ret;
-
- ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
- if (ret)
- return ret;
-
- /* Get LPCPD GPIO from ACPI */
- gpiod_lpcpd = devm_gpiod_get(dev, "lpcpd", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod_lpcpd)) {
- dev_err(dev, "Failed to retrieve lpcpd-gpios from acpi.\n");
- phy->io_lpcpd = -1;
- /*
- * lpcpd pin is not specified. This is not an issue as
- * power management can be also managed by TPM specific
- * commands. So leave with a success status code.
- */
- return 0;
- }
-
- phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
-
- return 0;
-}
-
-static int st33zp24_spi_of_request_resources(struct spi_device *spi_dev)
-{
- struct tpm_chip *chip = spi_get_drvdata(spi_dev);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
- struct device_node *pp;
- int gpio;
- int ret;
-
- pp = spi_dev->dev.of_node;
- if (!pp) {
- dev_err(&spi_dev->dev, "No platform data\n");
- return -ENODEV;
- }
-
- /* Get GPIO from device tree */
- gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
- if (gpio < 0) {
- dev_err(&spi_dev->dev,
- "Failed to retrieve lpcpd-gpios from dts.\n");
- phy->io_lpcpd = -1;
- /*
- * lpcpd pin is not specified. This is not an issue as
- * power management can be also managed by TPM specific
- * commands. So leave with a success status code.
- */
- return 0;
- }
- /* GPIO request and configuration */
- ret = devm_gpio_request_one(&spi_dev->dev, gpio,
- GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
- if (ret) {
- dev_err(&spi_dev->dev, "Failed to request lpcpd pin\n");
- return -ENODEV;
- }
- phy->io_lpcpd = gpio;
-
- return 0;
-}
-
-static int st33zp24_spi_request_resources(struct spi_device *dev)
-{
- struct tpm_chip *chip = spi_get_drvdata(dev);
- struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
- struct st33zp24_platform_data *pdata;
- int ret;
-
- pdata = dev->dev.platform_data;
- if (!pdata) {
- dev_err(&dev->dev, "No platform data\n");
- return -ENODEV;
- }
-
- /* store for late use */
- phy->io_lpcpd = pdata->io_lpcpd;
-
- if (gpio_is_valid(pdata->io_lpcpd)) {
- ret = devm_gpio_request_one(&dev->dev,
- pdata->io_lpcpd, GPIOF_OUT_INIT_HIGH,
- "TPM IO_LPCPD");
- if (ret) {
- dev_err(&dev->dev, "%s : reset gpio_request failed\n",
- __FILE__);
- return ret;
- }
- }
-
- return 0;
-}
-
/*
* st33zp24_spi_probe initialize the TPM device
* @param: dev, the spi_device description (TPM SPI description).
@@ -335,17 +221,8 @@ static int st33zp24_spi_request_resources(struct spi_device *dev)
*/
static int st33zp24_spi_probe(struct spi_device *dev)
{
- int ret;
- struct st33zp24_platform_data *pdata;
struct st33zp24_spi_phy *phy;
- /* Check SPI platform functionnalities */
- if (!dev) {
- pr_info("%s: dev is NULL. Device is not accessible.\n",
- __func__);
- return -ENODEV;
- }
-
phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy),
GFP_KERNEL);
if (!phy)
@@ -353,27 +230,11 @@ static int st33zp24_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
- pdata = dev->dev.platform_data;
- if (!pdata && dev->dev.of_node) {
- ret = st33zp24_spi_of_request_resources(dev);
- if (ret)
- return ret;
- } else if (pdata) {
- ret = st33zp24_spi_request_resources(dev);
- if (ret)
- return ret;
- } else if (ACPI_HANDLE(&dev->dev)) {
- ret = st33zp24_spi_acpi_request_resources(dev);
- if (ret)
- return ret;
- }
-
phy->latency = st33zp24_spi_evaluate_latency(phy);
if (phy->latency <= 0)
return -ENODEV;
- return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq,
- phy->io_lpcpd);
+ return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq);
}
/*
@@ -411,7 +272,7 @@ static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
static struct spi_driver st33zp24_spi_driver = {
.driver = {
- .name = TPM_ST33_SPI,
+ .name = "st33zp24-spi",
.pm = &st33zp24_spi_ops,
.of_match_table = of_match_ptr(of_st33zp24_spi_match),
.acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match),
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 15b393e92c8e..a5b554cd4778 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -4,6 +4,7 @@
* Copyright (C) 2009 - 2016 STMicroelectronics
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -12,7 +13,7 @@
#include <linux/freezer.h>
#include <linux/string.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -432,11 +433,18 @@ static const struct tpm_class_ops st33zp24_tpm = {
.req_canceled = st33zp24_req_canceled,
};
+static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
+ { "lpcpd-gpios", &lpcpd_gpios, 1 },
+ { },
+};
+
/*
* initialize the TPM device
*/
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
- struct device *dev, int irq, int io_lpcpd)
+ struct device *dev, int irq)
{
int ret;
u8 intmask = 0;
@@ -463,6 +471,25 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
tpm_dev->locality = LOCALITY0;
+ if (ACPI_COMPANION(dev)) {
+ ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Get LPCPD GPIO. If lpcpd pin is not specified. This is not an
+ * issue as power management can be also managed by TPM specific
+ * commands.
+ */
+ tpm_dev->io_lpcpd = devm_gpiod_get_optional(dev, "lpcpd",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(tpm_dev->io_lpcpd);
+ if (ret) {
+ dev_err(dev, "failed to request lpcpd gpio: %d\n", ret);
+ return ret;
+ }
+
if (irq) {
/* INTERRUPT Setup */
init_waitqueue_head(&tpm_dev->read_queue);
@@ -525,8 +552,8 @@ int st33zp24_pm_suspend(struct device *dev)
int ret = 0;
- if (gpio_is_valid(tpm_dev->io_lpcpd))
- gpio_set_value(tpm_dev->io_lpcpd, 0);
+ if (tpm_dev->io_lpcpd)
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 0);
else
ret = tpm_pm_suspend(dev);
@@ -540,8 +567,8 @@ int st33zp24_pm_resume(struct device *dev)
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
int ret = 0;
- if (gpio_is_valid(tpm_dev->io_lpcpd)) {
- gpio_set_value(tpm_dev->io_lpcpd, 1);
+ if (tpm_dev->io_lpcpd) {
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 1);
ret = wait_for_stat(chip,
TPM_STS_VALID, chip->timeout_b,
&tpm_dev->read_queue, false);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index b387a476c555..5acc85f711e6 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -7,6 +7,9 @@
#ifndef __LOCAL_ST33ZP24_H__
#define __LOCAL_ST33ZP24_H__
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
+
#define TPM_WRITE_DIRECTION 0x80
#define ST33ZP24_BUFSIZE 2048
@@ -17,7 +20,7 @@ struct st33zp24_dev {
int locality;
int irq;
u32 intrs;
- int io_lpcpd;
+ struct gpio_desc *io_lpcpd;
wait_queue_head_t read_queue;
};
@@ -33,6 +36,6 @@ int st33zp24_pm_resume(struct device *dev);
#endif
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
- struct device *dev, int irq, int io_lpcpd);
+ struct device *dev, int irq);
void st33zp24_remove(struct tpm_chip *chip);
#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 783d65fc71f0..741d8f3e8fb3 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -373,6 +373,11 @@ out:
}
EXPORT_SYMBOL_GPL(tpm_chip_alloc);
+static void tpm_put_device(void *dev)
+{
+ put_device(dev);
+}
+
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @pdev: parent device to which the chip is associated
@@ -391,7 +396,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
return chip;
rc = devm_add_action_or_reset(pdev,
- (void (*)(void *)) put_device,
+ tpm_put_device,
&chip->dev);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index dc4c0a0a5129..30b4c288c1bb 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -155,7 +155,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
out:
if (!priv->response_length) {
*off = 0;
- del_singleshot_timer_sync(&priv->user_read_timer);
+ del_timer_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
}
mutex_unlock(&priv->buffer_mutex);
@@ -262,7 +262,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
void tpm_common_release(struct file *file, struct file_priv *priv)
{
flush_work(&priv->async_work);
- del_singleshot_timer_sync(&priv->user_read_timer);
+ del_timer_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->response_length = 0;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1621ce818705..d69905233aff 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- if (!tpm_chip_start(chip)) {
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_STATE);
else
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
suspended:
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 18606651d1aa..7e9da671a0e8 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev,
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
return -ETIME;
}
@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
/* Should the FIFO driver handle this? */
sm = buf->start_method;
- if (sm == ACPI_TPM2_MEMORY_MAPPED)
- return -ENODEV;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
+ rc = -ENODEV;
+ goto out;
+ }
priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
@@ -700,27 +705,31 @@ static int crb_acpi_add(struct acpi_device *device)
rc = crb_map_io(device, priv, buf);
if (rc)
- return rc;
+ goto out;
chip = tpmm_chip_alloc(dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto out;
+ }
dev_set_drvdata(&chip->dev, priv);
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+
+out:
+ acpi_put_table((struct acpi_table_header *)buf);
+ return rc;
}
-static int crb_acpi_remove(struct acpi_device *device)
+static void crb_acpi_remove(struct acpi_device *device)
{
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_chip_unregister(chip);
-
- return 0;
}
static const struct dev_pm_ops crb_pm = {
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 5c233423c56f..deff23bb54bf 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void)
if (rc)
return rc;
- return driver_register(&ftpm_tee_driver.driver);
+ rc = driver_register(&ftpm_tee_driver.driver);
+ if (rc) {
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ return rc;
+ }
+
+ return 0;
}
static void __exit ftpm_mod_exit(void)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index bcff6429e0b4..ed5dabd3c72d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev)
const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
struct acpi_table_tpm2 *tbl;
acpi_status st;
+ int ret = 0;
if (!aid || aid->driver_data != DEVICE_IS_TPM2)
return 0;
@@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev)
/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
* table is mandatory
*/
- st =
- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
return -EINVAL;
@@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev)
/* The tpm2_crb driver handles this device */
if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
- return -ENODEV;
+ ret = -ENODEV;
- return 0;
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
}
#else
static int check_acpi_tpm2(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 757623bacfd5..3f98e587b3e8 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -682,15 +682,19 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- switch (priv->manufacturer_id) {
- case TPM_VID_WINBOND:
- return ((status == TPM_STS_VALID) ||
- (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
- case TPM_VID_STM:
- return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
- default:
- return (status == TPM_STS_COMMAND_READY);
+ if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) {
+ switch (priv->manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ break;
+ }
}
+
+ return status == TPM_STS_COMMAND_READY;
}
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 66a5a13cd1df..b68479e0de10 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -86,6 +86,7 @@ enum tis_defaults {
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
TPM_TIS_INVALID_STATUS = BIT(1),
+ TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
};
struct tpm_tis_data {
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index 0692510dfcab..f3a7251c8e38 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -49,7 +49,7 @@
/* Masks with bits that must be read zero */
#define TPM_ACCESS_READ_ZERO 0x48
-#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF60
#define TPM_STS_READ_ZERO 0x23
#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
@@ -329,6 +329,7 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev,
if (!phy->io_buf)
return -ENOMEM;
+ set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags);
phy->i2c_client = dev;
/* must precede all communication with the tpm */