Char/Misc driver patches for 3.9-rc1

Here's the big char/misc driver patches for 3.9-rc1.
 
 Nothing major here, just lots of different driver updates (mei, hyperv, ipack,
 extcon, vmci, etc.).
 
 All of these have been in the linux-next tree for a while.
 
 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iEYEABECAAYFAlEmZJgACgkQMUfUDdst+ymhZgCgo2dn37r9uMCwgTSpxSq92Je5
 x8kAnRF1UnD6ZvySRIlLUBV5LW1YgFnK
 =i5HH
 -----END PGP SIGNATURE-----

Merge tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver patches from Greg Kroah-Hartman:
 "Here's the big char/misc driver patches for 3.9-rc1.

  Nothing major here, just lots of different driver updates (mei,
  hyperv, ipack, extcon, vmci, etc.).

  All of these have been in the linux-next tree for a while."

* tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (209 commits)
  w1: w1_therm: Add force-pullup option for "broken" sensors
  w1: ds2482: Added 1-Wire pull-up support to the driver
  vme: add missing put_device() after device_register() fails
  extcon: max8997: Use workqueue to check cable state after completing boot of platform
  extcon: max8997: Set default UART/USB path on probe
  extcon: max8997: Consolidate duplicate code for checking ADC/CHG cable type
  extcon: max8997: Set default of ADC debounce time during initialization
  extcon: max8997: Remove duplicate code related to set H/W line path
  extcon: max8997: Move defined constant to header file
  extcon: max77693: Make max77693_extcon_cable static
  extcon: max8997: Remove unreachable code
  extcon: max8997: Make max8997_extcon_cable static
  extcon: max77693: Remove unnecessary goto statement to improve readability
  extcon: max77693: Convert to devm_input_allocate_device()
  extcon: gpio: Rename filename of extcon-gpio.c according to kernel naming style
  CREDITS: update email and address of Harald Hoyer
  extcon: arizona: Use MICDET for final microphone identification
  extcon: arizona: Always take the first HPDET reading as the final one
  extcon: arizona: Clear _trig_sts bits after jack detection
  extcon: arizona: Don't HPDET magic when headphones are enabled
  ...
This commit is contained in:
Linus Torvalds 2013-02-21 13:57:13 -08:00
commit 7ed214ac20
107 changed files with 20759 additions and 4125 deletions

View File

@ -1572,12 +1572,12 @@ S: Wantage, New Jersey 07461
S: USA
N: Harald Hoyer
E: harald.hoyer@parzelle.de
W: http://parzelle.de/
E: harald@redhat.com
W: http://www.harald-hoyer.de
D: ip_masq_quake
D: md boot support
S: Hohe Strasse 30
S: D-70176 Stuttgart
S: Am Strand 5
S: D-19063 Schwerin
S: Germany
N: Jan Hubicka

View File

@ -984,7 +984,7 @@ int main()
return errno;
}
configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR);
if (uiofd &lt; 0) {
if (configfd &lt; 0) {
perror(&quot;config open:&quot;);
return errno;
}

View File

@ -34,9 +34,16 @@ currently supported. The driver also doesn't support reduced
precision (which would also reduce the conversion time).
The module parameter strong_pullup can be set to 0 to disable the
strong pullup or 1 to enable. If enabled the 5V strong pullup will be
enabled when the conversion is taking place provided the master driver
must support the strong pullup (or it falls back to a pullup
strong pullup, 1 to enable autodetection or 2 to force strong pullup.
In case of autodetection, the driver will use the "READ POWER SUPPLY"
command to check if there are pariste powered devices on the bus.
If so, it will activate the master's strong pullup.
In case the detection of parasite devices using this command fails
(seems to be the case with some DS18S20) the strong pullup can
be force-enabled.
If the strong pullup is enabled, the master's strong pullup will be
driven when the conversion is taking place, provided the master driver
does support the strong pullup (or it falls back to a pullup
resistor). The DS18b20 temperature sensor specification lists a
maximum current draw of 1.5mA and that a 5k pullup resistor is not
sufficient. The strong pullup is designed to provide the additional

View File

@ -5405,6 +5405,13 @@ S: Maintained
F: Documentation/scsi/NinjaSCSI.txt
F: drivers/scsi/nsp32*
NTB DRIVER
M: Jon Mason <jon.mason@intel.com>
S: Supported
F: drivers/ntb/
F: drivers/net/ntb_netdev.c
F: include/linux/ntb.h
NTFS FILESYSTEM
M: Anton Altaparmakov <anton@tuxera.com>
L: linux-ntfs-dev@lists.sourceforge.net

View File

@ -152,6 +152,8 @@ source "drivers/memory/Kconfig"
source "drivers/iio/Kconfig"
source "drivers/ntb/Kconfig"
source "drivers/vme/Kconfig"
source "drivers/pwm/Kconfig"

View File

@ -147,3 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/

View File

@ -163,7 +163,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
}
UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL);
static struct platform_driver exynos_rng_driver = {

View File

@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
{
unsigned long p = *ppos;
ssize_t low_count, read, sz;
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
read = 0;
@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
if (p < (unsigned long) high_memory) {
@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long i = *ppos;
const char __user * tmp = buf;
const char __user *tmp = buf;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
@ -729,7 +729,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
static int open_port(struct inode * inode, struct file * filp)
static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
continue;
/*
* Create /dev/port?
* Create /dev/port?
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
depends on MFD_MAX77693
depends on MFD_MAX77693 && INPUT
select IRQ_DOMAIN
select REGMAP_I2C
help
@ -47,7 +47,7 @@ config EXTCON_MAX8997
config EXTCON_ARIZONA
tristate "Wolfson Arizona EXTCON support"
depends on MFD_ARIZONA && INPUT
depends on MFD_ARIZONA && INPUT && SND_SOC
help
Say Y here to enable support for external accessory detection
with Wolfson Arizona devices. These are audio CODECs with

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,7 @@
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/extcon.h>
#include <linux/extcon/extcon_gpio.h>
#include <linux/extcon/extcon-gpio.h>
struct gpio_extcon_data {
struct extcon_dev edev;

File diff suppressed because it is too large Load Diff

View File

@ -29,51 +29,14 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
/* MAX8997-MUIC STATUS1 register */
#define STATUS1_ADC_SHIFT 0
#define STATUS1_ADCLOW_SHIFT 5
#define STATUS1_ADCERR_SHIFT 6
#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
/* MAX8997-MUIC STATUS2 register */
#define STATUS2_CHGTYP_SHIFT 0
#define STATUS2_CHGDETRUN_SHIFT 3
#define STATUS2_DCDTMR_SHIFT 4
#define STATUS2_DBCHG_SHIFT 5
#define STATUS2_VBVOLT_SHIFT 6
#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
/* MAX8997-MUIC STATUS3 register */
#define STATUS3_OVP_SHIFT 2
#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
/* MAX8997-MUIC CONTROL1 register */
#define COMN1SW_SHIFT 0
#define COMP2SW_SHIFT 3
#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
#define MAX8997_ADC_GROUND 0x00
#define MAX8997_ADC_MHL 0x01
#define MAX8997_ADC_JIG_USB_1 0x18
#define MAX8997_ADC_JIG_USB_2 0x19
#define MAX8997_ADC_DESKDOCK 0x1a
#define MAX8997_ADC_JIG_UART 0x1c
#define MAX8997_ADC_CARDOCK 0x1d
#define MAX8997_ADC_OPEN 0x1f
enum max8997_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
ADC_DEBOUNCE_TIME_10MS, /* 10ms */
ADC_DEBOUNCE_TIME_25MS, /* 25ms */
ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
};
struct max8997_muic_irq {
unsigned int irq;
@ -82,61 +45,303 @@ struct max8997_muic_irq {
};
static struct max8997_muic_irq muic_irqs[] = {
{ MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
{ MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
{ MAX8997_MUICIRQ_ADC, "muic-ADC" },
{ MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
{ MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
{ MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
{ MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
{ MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
{ MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
{ MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
{ MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
{ MAX8997_MUICIRQ_ADC, "muic-ADC" },
{ MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
{ MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
{ MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
{ MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
{ MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
{ MAX8997_MUICIRQ_OVP, "muic-OVP" },
};
/* Define supported cable type */
enum max8997_muic_acc_type {
MAX8997_MUIC_ADC_GROUND = 0x0,
MAX8997_MUIC_ADC_MHL, /* MHL*/
MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
MAX8997_MUIC_ADC_RESERVED_ACC_1,
MAX8997_MUIC_ADC_RESERVED_ACC_2,
MAX8997_MUIC_ADC_RESERVED_ACC_3,
MAX8997_MUIC_ADC_RESERVED_ACC_4,
MAX8997_MUIC_ADC_RESERVED_ACC_5,
MAX8997_MUIC_ADC_CEA936_AUDIO,
MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
MAX8997_MUIC_ADC_TTY_CONVERTER,
MAX8997_MUIC_ADC_UART_CABLE,
MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
MAX8997_MUIC_ADC_OPEN, /* OPEN */
};
enum max8997_muic_cable_group {
MAX8997_CABLE_GROUP_ADC = 0,
MAX8997_CABLE_GROUP_ADC_GND,
MAX8997_CABLE_GROUP_CHG,
MAX8997_CABLE_GROUP_VBVOLT,
};
enum max8997_muic_usb_type {
MAX8997_USB_HOST,
MAX8997_USB_DEVICE,
};
enum max8997_muic_charger_type {
MAX8997_CHARGER_TYPE_NONE = 0,
MAX8997_CHARGER_TYPE_USB,
MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
MAX8997_CHARGER_TYPE_DEDICATED_CHG,
MAX8997_CHARGER_TYPE_500MA,
MAX8997_CHARGER_TYPE_1A,
MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
};
struct max8997_muic_info {
struct device *dev;
struct i2c_client *muic;
struct max8997_muic_platform_data *muic_pdata;
struct extcon_dev *edev;
int prev_cable_type;
int prev_chg_type;
u8 status[2];
int irq;
struct work_struct irq_work;
enum max8997_muic_charger_type pre_charger_type;
int pre_adc;
struct mutex mutex;
struct extcon_dev *edev;
struct max8997_muic_platform_data *muic_pdata;
enum max8997_muic_charger_type pre_charger_type;
/*
* Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
struct delayed_work wq_detcable;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
int path_usb;
int path_uart;
};
const char *max8997_extcon_cable[] = {
[0] = "USB",
[1] = "USB-Host",
[2] = "TA",
[3] = "Fast-charger",
[4] = "Slow-charger",
[5] = "Charge-downstream",
[6] = "MHL",
[7] = "Dock-desk",
[8] = "Dock-card",
[9] = "JIG",
enum {
EXTCON_CABLE_USB = 0,
EXTCON_CABLE_USB_HOST,
EXTCON_CABLE_TA,
EXTCON_CABLE_FAST_CHARGER,
EXTCON_CABLE_SLOW_CHARGER,
EXTCON_CABLE_CHARGE_DOWNSTREAM,
EXTCON_CABLE_MHL,
EXTCON_CABLE_DOCK_DESK,
EXTCON_CABLE_DOCK_CARD,
EXTCON_CABLE_JIG,
_EXTCON_CABLE_NUM,
};
static const char *max8997_extcon_cable[] = {
[EXTCON_CABLE_USB] = "USB",
[EXTCON_CABLE_USB_HOST] = "USB-Host",
[EXTCON_CABLE_TA] = "TA",
[EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
[EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
[EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
[EXTCON_CABLE_MHL] = "MHL",
[EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
[EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
[EXTCON_CABLE_JIG] = "JIG",
NULL,
};
/*
* max8997_muic_set_debounce_time - Set the debounce time of ADC
* @info: the instance including private data of max8997 MUIC
* @time: the debounce time of ADC
*/
static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
enum max8997_muic_adc_debounce_time time)
{
int ret;
switch (time) {
case ADC_DEBOUNCE_TIME_0_5MS:
case ADC_DEBOUNCE_TIME_10MS:
case ADC_DEBOUNCE_TIME_25MS:
case ADC_DEBOUNCE_TIME_38_62MS:
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL3,
time << CONTROL3_ADCDBSET_SHIFT,
CONTROL3_ADCDBSET_MASK);
if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
return -EAGAIN;
}
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
return -EINVAL;
}
return 0;
};
/*
* max8997_muic_set_path - Set hardware line according to attached cable
* @info: the instance including private data of max8997 MUIC
* @value: the path according to attached cable
* @attached: the state of cable (true:attached, false:detached)
*
* The max8997 MUIC device share outside H/W line among a varity of cables,
* so this function set internal path of H/W line according to the type of
* attached cable.
*/
static int max8997_muic_set_path(struct max8997_muic_info *info,
u8 val, bool attached)
{
int ret = 0;
u8 ctrl1, ctrl2 = 0;
if (attached)
ctrl1 = val;
else
ctrl1 = CONTROL1_SW_OPEN;
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return -EAGAIN;
}
if (attached)
ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
else
ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
ret = max8997_update_reg(info->muic,
MAX8997_MUIC_REG_CONTROL2, ctrl2,
CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return -EAGAIN;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
return 0;
}
/*
* max8997_muic_get_cable_type - Return cable type and check cable state
* @info: the instance including private data of max8997 MUIC
* @group: the path according to attached cable
* @attached: store cable state and return
*
* This function check the cable state either attached or detached,
* and then divide precise type of cable according to cable group.
* - MAX8997_CABLE_GROUP_ADC
* - MAX8997_CABLE_GROUP_CHG
*/
static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
enum max8997_muic_cable_group group, bool *attached)
{
int cable_type = 0;
int adc;
int chg_type;
switch (group) {
case MAX8997_CABLE_GROUP_ADC:
/*
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
adc = info->status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
* (info->prev_cable_type) for handling cable when cable is
* detached.
*/
if (adc == MAX8997_MUIC_ADC_OPEN) {
*attached = false;
cable_type = info->prev_cable_type;
info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
} else {
*attached = true;
cable_type = info->prev_cable_type = adc;
}
break;
case MAX8997_CABLE_GROUP_CHG:
/*
* Read charger type to check cable type and decide cable state
* according to type of charger cable.
*/
chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
chg_type >>= STATUS2_CHGTYP_SHIFT;
if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
} else {
*attached = true;
/*
* Check current cable state/cable type and store cable
* type(info->prev_chg_type) for handling cable when
* charger cable is detached.
*/
cable_type = info->prev_chg_type = chg_type;
}
break;
default:
dev_err(info->dev, "Unknown cable group (%d)\n", group);
cable_type = -EINVAL;
break;
}
return cable_type;
}
static int max8997_muic_handle_usb(struct max8997_muic_info *info,
enum max8997_muic_usb_type usb_type, bool attached)
{
int ret = 0;
if (usb_type == MAX8997_USB_HOST) {
/* switch to USB */
ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
SW_MASK);
if (ret) {
ret = max8997_muic_set_path(info, info->path_usb, attached);
if (ret < 0) {
dev_err(info->dev, "failed to update muic register\n");
goto out;
return ret;
}
}
@ -148,41 +353,39 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
extcon_set_cable_state(info->edev, "USB", attached);
break;
default:
ret = -EINVAL;
break;
dev_err(info->dev, "failed to detect %s usb cable\n",
attached ? "attached" : "detached");
return -EINVAL;
}
out:
return ret;
return 0;
}
static int max8997_muic_handle_dock(struct max8997_muic_info *info,
int adc, bool attached)
int cable_type, bool attached)
{
int ret = 0;
/* switch to AUDIO */
ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
SW_MASK);
ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
goto out;
return ret;
}
switch (adc) {
case MAX8997_ADC_DESKDOCK:
switch (cable_type) {
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
extcon_set_cable_state(info->edev, "Dock-desk", attached);
break;
case MAX8997_ADC_CARDOCK:
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
extcon_set_cable_state(info->edev, "Dock-card", attached);
break;
default:
ret = -EINVAL;
break;
dev_err(info->dev, "failed to detect %s dock device\n",
attached ? "attached" : "detached");
return -EINVAL;
}
out:
return ret;
return 0;
}
static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@ -191,199 +394,188 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
int ret = 0;
/* switch to UART */
ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
SW_MASK);
ret = max8997_muic_set_path(info, info->path_uart, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
goto out;
return -EINVAL;
}
extcon_set_cable_state(info->edev, "JIG", attached);
out:
return ret;
return 0;
}
static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
static int max8997_muic_adc_handler(struct max8997_muic_info *info)
{
int cable_type;
bool attached;
int ret = 0;
switch (info->pre_adc) {
case MAX8997_ADC_GROUND:
ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
/* Check cable state which is either detached or attached */
cable_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_ADC, &attached);
switch (cable_type) {
case MAX8997_MUIC_ADC_GROUND:
ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
if (ret < 0)
return ret;
break;
case MAX8997_ADC_MHL:
extcon_set_cable_state(info->edev, "MHL", false);
case MAX8997_MUIC_ADC_MHL:
extcon_set_cable_state(info->edev, "MHL", attached);
break;
case MAX8997_ADC_JIG_USB_1:
case MAX8997_ADC_JIG_USB_2:
ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
if (ret < 0)
return ret;
break;
case MAX8997_ADC_DESKDOCK:
case MAX8997_ADC_CARDOCK:
ret = max8997_muic_handle_dock(info, info->pre_adc, false);
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
ret = max8997_muic_handle_dock(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX8997_ADC_JIG_UART:
ret = max8997_muic_handle_jig_uart(info, false);
break;
default:
break;
}
return ret;
}
static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
{
int ret = 0;
switch (adc) {
case MAX8997_ADC_GROUND:
ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
break;
case MAX8997_ADC_MHL:
extcon_set_cable_state(info->edev, "MHL", true);
break;
case MAX8997_ADC_JIG_USB_1:
case MAX8997_ADC_JIG_USB_2:
ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
break;
case MAX8997_ADC_DESKDOCK:
case MAX8997_ADC_CARDOCK:
ret = max8997_muic_handle_dock(info, adc, true);
break;
case MAX8997_ADC_JIG_UART:
ret = max8997_muic_handle_jig_uart(info, true);
break;
case MAX8997_ADC_OPEN:
ret = max8997_muic_handle_adc_detach(info);
break;
default:
ret = -EINVAL;
goto out;
}
info->pre_adc = adc;
out:
return ret;
}
static int max8997_muic_handle_charger_type_detach(
struct max8997_muic_info *info)
{
switch (info->pre_charger_type) {
case MAX8997_CHARGER_TYPE_USB:
extcon_set_cable_state(info->edev, "USB", false);
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_cable_state(info->edev, "Charge-downstream", false);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_cable_state(info->edev, "TA", false);
break;
case MAX8997_CHARGER_TYPE_500MA:
extcon_set_cable_state(info->edev, "Slow-charger", false);
break;
case MAX8997_CHARGER_TYPE_1A:
extcon_set_cable_state(info->edev, "Fast-charger", false);
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
ret = max8997_muic_handle_jig_uart(info, attached);
break;
case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX8997_MUIC_ADC_RESERVED_ACC_1:
case MAX8997_MUIC_ADC_RESERVED_ACC_2:
case MAX8997_MUIC_ADC_RESERVED_ACC_3:
case MAX8997_MUIC_ADC_RESERVED_ACC_4:
case MAX8997_MUIC_ADC_RESERVED_ACC_5:
case MAX8997_MUIC_ADC_CEA936_AUDIO:
case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
case MAX8997_MUIC_ADC_TTY_CONVERTER:
case MAX8997_MUIC_ADC_UART_CABLE:
case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
/*
* This cable isn't used in general case if it is specially
* needed to detect additional cable, should implement
* proper operation when this cable is attached/detached.
*/
dev_info(info->dev,
"cable is %s but it isn't used (type:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s unknown cable (type:0x%x)\n",
attached ? "attached" : "detached", cable_type);
return -EINVAL;
break;
}
return 0;
}
static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
enum max8997_muic_charger_type charger_type)
static int max8997_muic_chg_handler(struct max8997_muic_info *info)
{
u8 adc;
int ret;
int chg_type;
bool attached;
int adc;
ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
goto out;
}
chg_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_CHG, &attached);
switch (charger_type) {
switch (chg_type) {
case MAX8997_CHARGER_TYPE_NONE:
ret = max8997_muic_handle_charger_type_detach(info);
break;
case MAX8997_CHARGER_TYPE_USB:
if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
adc = info->status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
max8997_muic_handle_usb(info,
MAX8997_USB_DEVICE, true);
MAX8997_USB_DEVICE, attached);
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_cable_state(info->edev, "Charge-downstream", true);
extcon_set_cable_state(info->edev, "Charge-downstream", attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_cable_state(info->edev, "TA", true);
extcon_set_cable_state(info->edev, "TA", attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
extcon_set_cable_state(info->edev, "Slow-charger", true);
extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
case MAX8997_CHARGER_TYPE_1A:
extcon_set_cable_state(info->edev, "Fast-charger", true);
extcon_set_cable_state(info->edev, "Fast-charger", attached);
break;
default:
ret = -EINVAL;
goto out;
dev_err(info->dev,
"failed to detect %s unknown chg cable (type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
return -EINVAL;
}
info->pre_charger_type = charger_type;
out:
return ret;
return 0;
}
static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
u8 status[2];
u8 adc, chg_type;
int irq_type = 0;
int i, ret;
if (!info->edev)
return;
mutex_lock(&info->mutex);
for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
2, status);
2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
mutex_unlock(&info->mutex);
return;
}
dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
status[0], status[1]);
for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
if (info->irq == muic_irqs[i].virq)
irq_type = muic_irqs[i].irq;
switch (irq_type) {
case MAX8997_MUICIRQ_ADCError:
case MAX8997_MUICIRQ_ADCLow:
case MAX8997_MUICIRQ_ADC:
adc = status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
max8997_muic_handle_adc(info, adc);
/* Handle all of cable except for charger cable */
ret = max8997_muic_adc_handler(info);
break;
case MAX8997_MUICIRQ_VBVolt:
case MAX8997_MUICIRQ_DBChg:
case MAX8997_MUICIRQ_DCDTmr:
case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
chg_type = status[1] & STATUS2_CHGTYP_MASK;
chg_type >>= STATUS2_CHGTYP_SHIFT;
max8997_muic_handle_charger_type(info, chg_type);
/* Handle charger cable */
ret = max8997_muic_chg_handler(info);
break;
case MAX8997_MUICIRQ_OVP:
break;
default:
dev_info(info->dev, "misc interrupt: irq %d occurred\n",
irq_type);
break;
mutex_unlock(&info->mutex);
return;
}
if (ret < 0)
dev_err(info->dev, "failed to handle MUIC interrupt\n");
mutex_unlock(&info->mutex);
return;
@ -401,29 +593,60 @@ static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
static void max8997_muic_detect_dev(struct max8997_muic_info *info)
static int max8997_muic_detect_dev(struct max8997_muic_info *info)
{
int ret;
u8 status[2], adc, chg_type;
int ret = 0;
int adc;
int chg_type;
bool attached;
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
2, status);
mutex_lock(&info->mutex);
/* Read STATUSx register to detect accessory */
ret = max8997_bulk_read(info->muic,
MAX8997_MUIC_REG_STATUS1, 2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
return;
dev_err(info->dev, "failed to read MUIC register\n");
mutex_unlock(&info->mutex);
return -EINVAL;
}
dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
status[0], status[1]);
adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
&attached);
if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
ret = max8997_muic_adc_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect ADC cable\n");
mutex_unlock(&info->mutex);
return ret;
}
}
adc = status[0] & STATUS1_ADC_MASK;
adc >>= STATUS1_ADC_SHIFT;
chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
&attached);
if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
ret = max8997_muic_chg_handler(info);
if (ret < 0) {
dev_err(info->dev, "Cannot detect charger cable\n");
mutex_unlock(&info->mutex);
return ret;
}
}
chg_type = status[1] & STATUS2_CHGTYP_MASK;
chg_type >>= STATUS2_CHGTYP_SHIFT;
mutex_unlock(&info->mutex);
max8997_muic_handle_adc(info, adc);
max8997_muic_handle_charger_type(info, chg_type);
return 0;
}
static void max8997_muic_detect_cable_wq(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(to_delayed_work(work),
struct max8997_muic_info, wq_detcable);
int ret;
ret = max8997_muic_detect_dev(info);
if (ret < 0)
pr_err("failed to detect cable type\n");
}
static int max8997_muic_probe(struct platform_device *pdev)
@ -431,6 +654,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
int delay_jiffies;
int ret, i;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@ -459,8 +683,10 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
muic_irq->virq = virq;
ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
0, muic_irq->name, info);
ret = request_threaded_irq(virq, NULL,
max8997_muic_irq_handler,
IRQF_NO_SUSPEND,
muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
@ -496,10 +722,42 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
}
/* Initial device detection */
max8997_muic_detect_dev(info);
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (pdata->muic_pdata->path_uart)
info->path_uart = pdata->muic_pdata->path_uart;
else
info->path_uart = CONTROL1_SW_UART;
return ret;
if (pdata->muic_pdata->path_usb)
info->path_usb = pdata->muic_pdata->path_usb;
else
info->path_usb = CONTROL1_SW_USB;
/* Set initial path for UART */
max8997_muic_set_path(info, info->path_uart, true);
/* Set ADC debounce time */
max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
/*
* Detect accessory after completing the initialization of platform
*
* - Use delayed workqueue to detect cable state and then
* notify cable state to notifiee/platform through uevent.
* After completing the booting of platform, the extcon provider
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
if (pdata->muic_pdata->detcable_delay_ms)
delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return 0;
err_irq:
while (--i >= 0)

View File

@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
{ VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
{ HV_MOUSE_GUID, },
{ },
};

View File

@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
[channel->monitor_grp].pending);
} else {
vmbus_set_event(channel->offermsg.child_relid);
vmbus_set_event(channel);
}
}
@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
open_msg->server_contextarea_gpadlhandle = 0;
open_msg->target_vp = newchannel->target_vp;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
int ret;
bool signal = false;
/* Setup the descriptor */
@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 packetlen;
u32 userlen;
int ret;
bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
/* Copy over the packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
(desc.offset8 << 3));
(desc.offset8 << 3), &signal);
if (signal)
vmbus_setevent(channel);
return 0;
}
@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
{
struct vmpacket_descriptor desc;
u32 packetlen;
u32 userlen;
int ret;
bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
packetlen = desc.len8 << 3;
userlen = packetlen - (desc.offset8 << 3);
*buffer_actual_len = packetlen;
@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.trans_id;
/* Copy over the entire packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
&signal);
if (signal)
vmbus_setevent(channel);
return 0;
}

View File

@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
enum {
IDE = 0,
SCSI,
NIC,
MAX_PERF_CHN,
};
/*
* This is an array of device_ids (device types) that are performance critical.
* We attempt to distribute the interrupt load for these devices across
* all available CPUs.
*/
static const struct hv_vmbus_device_id hp_devs[] = {
/* IDE */
{ HV_IDE_GUID, },
/* Storage - SCSI */
{ HV_SCSI_GUID, },
/* Network */
{ HV_NIC_GUID, },
};
/*
* We use this state to statically distribute the channel interrupt load.
*/
static u32 next_vp;
/*
* Starting with Win8, we can statically distribute the incoming
* channel interrupt load by binding a channel to VCPU. We
* implement here a simple round robin scheme for distributing
* the interrupt load.
* We will bind channels that are not performance critical to cpu 0 and
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
static u32 get_vp_index(uuid_le *type_guid)
{
u32 cur_cpu;
int i;
bool perf_chn = false;
u32 max_cpus = num_online_cpus();
for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].guid,
sizeof(uuid_le))) {
perf_chn = true;
break;
}
}
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
* Prior to win8, all channel interrupts are
* delivered on cpu 0.
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
return 0;
}
cur_cpu = (++next_vp % max_cpus);
return 0;
}
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
/*
* By default we setup state to enable batched
* reading. A specific service can choose to
* disable this prior to opening the channel.
*/
newchannel->batched_reading = true;
/*
* Setup state for signalling the host.
*/
newchannel->sig_event = (struct hv_input_signal_event *)
(ALIGN((unsigned long)
&newchannel->sig_buf,
HV_HYPERCALL_PARAM_ALIGN));
newchannel->sig_event->connectionid.asu32 = 0;
newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
newchannel->sig_event->flag_number = 0;
newchannel->sig_event->rsvdz = 0;
if (vmbus_proto_version != VERSION_WS2008) {
newchannel->is_dedicated_interrupt =
(offer->is_dedicated_interrupt != 0);
newchannel->sig_event->connectionid.u.id =
offer->connection_id;
}
newchannel->target_vp = get_vp_index(&offer->offer.if_type);
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;

View File

@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/export.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@ -39,16 +40,100 @@ struct vmbus_connection vmbus_connection = {
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
};
/*
* Negotiated protocol version with the host.
*/
__u32 vmbus_proto_version;
EXPORT_SYMBOL_GPL(vmbus_proto_version);
static __u32 vmbus_get_next_version(__u32 current_version)
{
switch (current_version) {
case (VERSION_WIN7):
return VERSION_WS2008;
case (VERSION_WIN8):
return VERSION_WIN7;
case (VERSION_WS2008):
default:
return VERSION_INVAL;
}
}
static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
__u32 version)
{
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
int t;
init_completion(&msginfo->waitevent);
msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
msg->vmbus_version_requested = version;
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
msg->monitor_page2 = virt_to_phys(
(void *)((unsigned long)vmbus_connection.monitor_pages +
PAGE_SIZE));
/*
* Add to list before we send the request since we may
* receive the response before returning from this routine
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&msginfo->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact));
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
flags);
return ret;
}
/* Wait for the connection response */
t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
if (t == 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
flags);
return -ETIMEDOUT;
}
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
/* Check if successful */
if (msginfo->response.version_response.version_supported) {
vmbus_connection.conn_state = CONNECTED;
} else {
return -ECONNREFUSED;
}
return ret;
}
/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
int ret = 0;
int t;
struct vmbus_channel_msginfo *msginfo = NULL;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
__u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
@ -99,69 +184,38 @@ int vmbus_connect(void)
goto cleanup;
}
init_completion(&msginfo->waitevent);
msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
msg->monitor_page2 = virt_to_phys(
(void *)((unsigned long)vmbus_connection.monitor_pages +
PAGE_SIZE));
/*
* Add to list before we send the request since we may
* receive the response before returning from this routine
* Negotiate a compatible VMBUS version number with the
* host. We start with the highest number we can support
* and work our way down until we negotiate a compatible
* version.
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&msginfo->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
version = VERSION_CURRENT;
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact));
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
flags);
do {
ret = vmbus_negotiate_version(msginfo, version);
if (ret == 0)
break;
version = vmbus_get_next_version(version);
} while (version != VERSION_INVAL);
if (version == VERSION_INVAL)
goto cleanup;
}
/* Wait for the connection response */
t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
if (t == 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
flags);
ret = -ETIMEDOUT;
goto cleanup;
}
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
/* Check if successful */
if (msginfo->response.version_response.version_supported) {
vmbus_connection.conn_state = CONNECTED;
} else {
pr_err("Unable to connect, "
"Version %d not supported by Hyper-V\n",
VMBUS_REVISION_NUMBER);
ret = -ECONNREFUSED;
goto cleanup;
}
vmbus_proto_version = version;
pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
host_info_eax, host_info_ebx >> 16,
host_info_ebx & 0xFFFF, host_info_ecx,
host_info_edx >> 24, host_info_edx & 0xFFFFFF,
version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
cleanup:
pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
if (vmbus_connection.work_queue)
@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
unsigned long flags;
void *arg;
bool read_state;
u32 bytes_to_read;
/*
* Find the channel based on this relid and invokes the
@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
*/
spin_lock_irqsave(&channel->inbound_lock, flags);
if (channel->onchannel_callback != NULL)
channel->onchannel_callback(channel->channel_callback_context);
else
if (channel->onchannel_callback != NULL) {
arg = channel->channel_callback_context;
read_state = channel->batched_reading;
/*
* This callback reads the messages sent by the host.
* We can optimize host to guest signaling by ensuring:
* 1. While reading the channel, we disable interrupts from
* host.
* 2. Ensure that we process all posted messages from the host
* before returning from this callback.
* 3. Once we return, enable signaling from the host. Once this
* state is set we check to see if additional packets are
* available to read. In this case we repeat the process.
*/
do {
hv_begin_read(&channel->inbound);
channel->onchannel_callback(arg);
bytes_to_read = hv_end_read(&channel->inbound);
} while (read_state && (bytes_to_read != 0));
} else {
pr_err("no channel callback for relid - %u\n", relid);
}
spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
void vmbus_on_event(unsigned long data)
{
u32 dword;
u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
u32 maxdword;
int bit;
u32 relid;
u32 *recv_int_page = vmbus_connection.recv_int_page;
u32 *recv_int_page = NULL;
void *page_addr;
int cpu = smp_processor_id();
union hv_synic_event_flags *event;
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7)) {
maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
recv_int_page = vmbus_connection.recv_int_page;
} else {
/*
* When the host is win8 and beyond, the event page
* can be directly checked to get the id of the channel
* that has the interrupt pending.
*/
maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
page_addr = hv_context.synic_event_page[cpu];
event = (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
recv_int_page = event->flags32;
}
/* Check events */
if (!recv_int_page)
@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
int vmbus_set_event(u32 child_relid)
int vmbus_set_event(struct vmbus_channel *channel)
{
/* Each u32 represents 32 channels */
sync_set_bit(child_relid & 31,
(unsigned long *)vmbus_connection.send_int_page +
(child_relid >> 5));
u32 child_relid = channel->offermsg.child_relid;
return hv_signal_event();
if (!channel->is_dedicated_interrupt) {
/* Each u32 represents 32 channels */
sync_set_bit(child_relid & 31,
(unsigned long *)vmbus_connection.send_int_page +
(child_relid >> 5));
}
return hv_signal_event(channel->sig_event);
}

View File

@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
#include <linux/interrupt.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@ -34,13 +35,16 @@
struct hv_context hv_context = {
.synic_initialized = false,
.hypercall_page = NULL,
.signal_event_param = NULL,
.signal_event_buffer = NULL,
};
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
unsigned int host_info_eax;
unsigned int host_info_ebx;
unsigned int host_info_ecx;
unsigned int host_info_edx;
static int query_hypervisor_info(void)
{
unsigned int eax;
@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
edx = 0;
op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
eax,
ebx >> 16,
ebx & 0xFFFF,
ecx,
edx >> 24,
edx & 0xFFFFFF);
host_info_eax = eax;
host_info_ebx = ebx;
host_info_ecx = ecx;
host_info_edx = edx;
}
return max_leaf;
}
@ -137,6 +138,10 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
memset(hv_context.vp_index, 0,
sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0,
sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@ -168,24 +173,6 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
/* Setup the global signal event param for the signal event hypercall */
hv_context.signal_event_buffer =
kmalloc(sizeof(struct hv_input_signal_event_buffer),
GFP_KERNEL);
if (!hv_context.signal_event_buffer)
goto cleanup;
hv_context.signal_event_param =
(struct hv_input_signal_event *)
(ALIGN((unsigned long)
hv_context.signal_event_buffer,
HV_HYPERCALL_PARAM_ALIGN));
hv_context.signal_event_param->connectionid.asu32 = 0;
hv_context.signal_event_param->connectionid.u.id =
VMBUS_EVENT_CONNECTION_ID;
hv_context.signal_event_param->flag_number = 0;
hv_context.signal_event_param->rsvdz = 0;
return 0;
cleanup:
@ -213,10 +200,6 @@ void hv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
kfree(hv_context.signal_event_buffer);
hv_context.signal_event_buffer = NULL;
hv_context.signal_event_param = NULL;
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
*
* This involves a hypercall.
*/
u16 hv_signal_event(void)
u16 hv_signal_event(void *con_id)
{
u16 status;
status = do_hypercall(HVCALL_SIGNAL_EVENT,
hv_context.signal_event_param,
NULL) & 0xFFFF;
status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
return status;
}
@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
u64 vp_index;
u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
hv_context.event_dpc[cpu] = (struct tasklet_struct *)
kmalloc(sizeof(struct tasklet_struct),
GFP_ATOMIC);
if (hv_context.event_dpc[cpu] == NULL) {
pr_err("Unable to allocate event dpc\n");
goto cleanup;
}
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
hv_context.synic_message_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
shared_sint.as_uint64 = 0;
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.masked = false;
shared_sint.auto_eoi = false;
shared_sint.auto_eoi = true;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
hv_context.synic_initialized = true;
/*
* Setup the mapping between Hyper-V's notion
* of cpuid and Linux' notion of cpuid.
* This array will be indexed using Linux cpuid.
*/
rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
hv_context.vp_index[cpu] = (u32)vp_index;
return;
cleanup:

View File

@ -29,7 +29,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/mman.h>
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
@ -415,10 +414,17 @@ struct dm_info_msg {
static bool hot_add;
static bool do_hot_add;
/*
* Delay reporting memory pressure by
* the specified number of seconds.
*/
static uint pressure_report_delay = 30;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
@ -517,6 +523,34 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
}
}
unsigned long compute_balloon_floor(void)
{
unsigned long min_pages;
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
/* Simple continuous piecewiese linear function:
* max MiB -> min MiB gradient
* 0 0
* 16 16
* 32 24
* 128 72 (1/2)
* 512 168 (1/4)
* 2048 360 (1/8)
* 8192 552 (1/32)
* 32768 1320
* 131072 4392
*/
if (totalram_pages < MB2PAGES(128))
min_pages = MB2PAGES(8) + (totalram_pages >> 1);
else if (totalram_pages < MB2PAGES(512))
min_pages = MB2PAGES(40) + (totalram_pages >> 2);
else if (totalram_pages < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (totalram_pages >> 3);
else
min_pages = MB2PAGES(296) + (totalram_pages >> 5);
#undef MB2PAGES
return min_pages;
}
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
@ -530,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
struct sysinfo val;
if (pressure_report_delay > 0) {
--pressure_report_delay;
return;
}
si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
status.num_committed = vm_memory_committed();
/*
* The host expects the guest to report free memory.
* Further, the host expects the pressure information to
* include the ballooned out pages.
* For a given amount of memory that we are managing, we
* need to compute a floor below which we should not balloon.
* Compute this and add it to the pressure report.
*/
status.num_avail = val.freeram;
status.num_committed = vm_memory_committed() +
dm->num_pages_ballooned +
compute_balloon_floor();
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
@ -547,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
}
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
@ -1013,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
{ VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
},
{ HV_DM_GUID, },
{ },
};

View File

@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
.util_deinit = hv_kvp_deinit,
};
static void perform_shutdown(struct work_struct *dummy)
{
orderly_poweroff(true);
}
/*
* Perform the shutdown operation in a thread context.
*/
static DECLARE_WORK(shutdown_work, perform_shutdown);
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
}
if (execute_shutdown == true)
orderly_poweroff(true);
schedule_work(&shutdown_work);
}
/*
@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
}
}
/*
* The set of services managed by the util driver are not performance
* critical and do not need batched reading. Furthermore, some services
* such as KVP can only handle one message from the host at a time.
* Turn off batched reading for all util drivers before we open the
* channel.
*/
set_channel_read_state(dev->channel, false);
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
{ VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
.driver_data = (unsigned long)&util_shutdown },
{ HV_SHUTDOWN_GUID,
.driver_data = (unsigned long)&util_shutdown
},
/* Time synch guid */
{ VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
.driver_data = (unsigned long)&util_timesynch },
{ HV_TS_GUID,
.driver_data = (unsigned long)&util_timesynch
},
/* Heartbeat guid */
{ VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
.driver_data = (unsigned long)&util_heartbeat },
{ HV_HEART_BEAT_GUID,
.driver_data = (unsigned long)&util_heartbeat
},
/* KVP guid */
{ VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
.driver_data = (unsigned long)&util_kvp },
{ HV_KVP_GUID,
.driver_data = (unsigned long)&util_kvp
},
{ },
};

View File

@ -101,15 +101,6 @@ enum hv_message_type {
/* Define invalid partition identifier. */
#define HV_PARTITION_ID_INVALID ((u64)0x0)
/* Define connection identifier type. */
union hv_connection_id {
u32 asu32;
struct {
u32 id:24;
u32 reserved:8;
} u;
};
/* Define port identifier type. */
union hv_port_id {
u32 asu32;
@ -338,13 +329,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
/* Definition of the hv_signal_event hypercall input structure. */
struct hv_input_signal_event {
union hv_connection_id connectionid;
u16 flag_number;
u16 rsvdz;
};
/*
* Versioning definitions used for guests reporting themselves to the
* hypervisor, and visa versa.
@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
struct hv_input_signal_event_buffer {
u64 align8;
struct hv_input_signal_event event;
};
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
@ -513,16 +492,24 @@ struct hv_context {
bool synic_initialized;
/*
* This is used as an input param to HvCallSignalEvent hypercall. The
* input param is immutable in our usage and must be dynamic mem (vs
* stack or global). */
struct hv_input_signal_event_buffer *signal_event_buffer;
/* 8-bytes aligned of the buffer above */
struct hv_input_signal_event *signal_event_param;
void *synic_message_page[NR_CPUS];
void *synic_event_page[NR_CPUS];
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
* in the context of the calling CPU. Setup a map for easy access
* to this information:
*
* vp_index[a] is the Hyper-V's processor ID corresponding to
* Linux cpuid 'a'.
*/
u32 vp_index[NR_CPUS];
/*
* Starting with win8, we can take channel interrupts on any CPU;
* we will manage the tasklet that handles events on a per CPU
* basis.
*/
struct tasklet_struct *event_dpc[NR_CPUS];
};
extern struct hv_context hv_context;
@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
extern u16 hv_signal_event(void);
extern u16 hv_signal_event(void *con_id);
extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
/*
* Host version information.
*/
extern unsigned int host_info_eax;
extern unsigned int host_info_ebx;
extern unsigned int host_info_ecx;
extern unsigned int host_info_edx;
/* Interface */
@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
u32 sgcount);
u32 sgcount, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
void *buffer,
u32 buflen,
u32 offset);
u32 offset, bool *signal);
u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
void hv_begin_read(struct hv_ring_buffer_info *rbi);
u32 hv_end_read(struct hv_ring_buffer_info *rbi);
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@ -657,7 +654,7 @@ int vmbus_connect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
int vmbus_set_event(u32 child_relid);
int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);

View File

@ -29,6 +29,105 @@
#include "hyperv_vmbus.h"
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 1;
smp_mb();
}
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
u32 read;
u32 write;
rbi->ring_buffer->interrupt_mask = 0;
smp_mb();
/*
* Now check to see if the ring buffer is still empty.
* If it is not, we raced and we need to process new
* incoming messages.
*/
hv_get_ringbuffer_availbytes(rbi, &read, &write);
return read;
}
/*
* When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol:
*
* 1. The host guarantees that while it is draining the
* ring buffer, it will set the interrupt_mask to
* indicate it does not need to be interrupted when
* new data is placed.
*
* 2. The host guarantees that it will completely drain
* the ring buffer before exiting the read loop. Further,
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
*/
static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
if (rbi->ring_buffer->interrupt_mask)
return false;
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
if (old_write == rbi->ring_buffer->read_index)
return true;
return false;
}
/*
* To optimize the flow management on the send-side,
* when the sender is blocked because of lack of
* sufficient space in the ring buffer, potential the
* consumer of the ring buffer can signal the producer.
* This is controlled by the following parameters:
*
* 1. pending_send_sz: This is the size in bytes that the
* producer is trying to send.
* 2. The feature bit feat_pending_send_sz set to indicate if
* the consumer of the ring will signal when the ring
* state transitions from being full to a state where
* there is room for the producer to send the pending packet.
*/
static bool hv_need_to_signal_on_read(u32 old_rd,
struct hv_ring_buffer_info *rbi)
{
u32 prev_write_sz;
u32 cur_write_sz;
u32 r_size;
u32 write_loc = rbi->ring_buffer->write_index;
u32 read_loc = rbi->ring_buffer->read_index;
u32 pending_sz = rbi->ring_buffer->pending_send_sz;
/*
* If the other end is not blocked on write don't bother.
*/
if (pending_sz == 0)
return false;
r_size = rbi->ring_datasize;
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
read_loc - write_loc;
prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
old_rd - write_loc;
if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
return true;
return false;
}
/*
* hv_get_next_write_location()
@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
}
}
/*
*
* hv_get_ringbuffer_interrupt_mask()
*
* Get the interrupt mask for the specified ring buffer
*
*/
u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
{
return rbi->ring_buffer->interrupt_mask;
}
/*
*
* hv_ringbuffer_init()
@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sglist, u32 sgcount)
struct scatterlist *sglist, u32 sgcount, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sg;
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
old_write = next_write_location;
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
/* Make sure we flush all writes before updating the writeIndex */
smp_wmb();
/* Issue a full memory barrier before updating the write index */
smp_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
*signal = hv_need_to_signal(old_write, outring_info);
return 0;
}
@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
u32 buflen, u32 offset)
u32 buflen, u32 offset, bool *signal)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
u32 old_read;
if (buflen <= 0)
return -EINVAL;
@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
old_read = bytes_avail_toread;
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
*signal = hv_need_to_signal_on_read(old_read, inring_info);
return 0;
}

View File

@ -33,6 +33,7 @@
#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include "hyperv_vmbus.h"
@ -41,7 +42,6 @@
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
static struct tasklet_struct event_dpc;
static struct completion probe_event;
static int irq;
@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
union hv_synic_event_flags *event;
bool handled = false;
page_addr = hv_context.synic_event_page[cpu];
if (page_addr == NULL)
return IRQ_NONE;
event = (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
/*
* Check for events before checking for messages. This is the order
* in which events and messages are checked in Windows guests on
* Hyper-V, and the Windows team suggested we do the same.
*/
page_addr = hv_context.synic_event_page[cpu];
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7)) {
/* Since we are a child, we only need to check bit 0 */
if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
/* Since we are a child, we only need to check bit 0 */
if (sync_test_and_clear_bit(0,
(unsigned long *) &event->flags32[0])) {
handled = true;
}
} else {
/*
* Our host is win8 or above. The signaling mechanism
* has changed and we can directly look at the event page.
* If bit n is set then we have an interrup on the channel
* whose id is n.
*/
handled = true;
tasklet_schedule(&event_dpc);
}
if (handled)
tasklet_schedule(hv_context.event_dpc[cpu]);
page_addr = hv_context.synic_message_page[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
@ -484,6 +503,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
return IRQ_NONE;
}
/*
* vmbus interrupt flow handler:
* vmbus interrupts can concurrently occur on multiple CPUs and
* can be handled concurrently.
*/
static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
{
kstat_incr_irqs_this_cpu(irq, desc);
desc->action->handler(irq, desc->action->dev_id);
}
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
}
tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
tasklet_init(&event_dpc, vmbus_on_event, 0);
ret = bus_register(&hv_bus);
if (ret)
@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
goto err_unregister;
}
/*
* Vmbus interrupts can be handled concurrently on
* different CPUs. Establish an appropriate interrupt flow
* handler that can support this model.
*/
irq_set_handler(irq, vmbus_flow_handler);
vector = IRQ0_VECTOR + irq;
/*
@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
ret = driver_register(&hv_driver->driver);
vmbus_request_offers();
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);

View File

@ -20,7 +20,6 @@
#include <linux/serial.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ipack.h>
#include "ipoctal.h"
@ -38,21 +37,19 @@ struct ipoctal_channel {
spinlock_t lock;
unsigned int pointer_read;
unsigned int pointer_write;
atomic_t open;
struct tty_port tty_port;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
unsigned char *board_write;
u8 isr_rx_rdy_mask;
u8 isr_tx_rdy_mask;
unsigned int rx_enable;
};
struct ipoctal {
struct ipack_device *dev;
unsigned int board_id;
struct ipoctal_channel channel[NR_CHANNELS];
unsigned char write;
struct tty_driver *tty_drv;
u8 __iomem *mem8_space;
u8 __iomem *int_space;
@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
channel = dev_get_drvdata(tty->dev);
/*
* Enable RX. TX will be enabled when
* there is something to send
*/
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
channel->rx_enable = 1;
return 0;
}
static int ipoctal_open(struct tty_struct *tty, struct file *file)
{
int res;
struct ipoctal_channel *channel;
channel = dev_get_drvdata(tty->dev);
if (atomic_read(&channel->open))
return -EBUSY;
tty->driver_data = channel;
res = tty_port_open(&channel->tty_port, tty, file);
if (res)
return res;
atomic_inc(&channel->open);
return 0;
return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
struct ipoctal_channel *channel = tty->driver_data;
tty_port_close(&channel->tty_port, tty, filp);
if (atomic_dec_and_test(&channel->open))
ipoctal_free_channel(channel);
ipoctal_free_channel(channel);
}
static int ipoctal_get_icount(struct tty_struct *tty,
@ -137,11 +127,12 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
{
struct tty_port *port = &channel->tty_port;
unsigned char value;
unsigned char flag = TTY_NORMAL;
unsigned char flag;
u8 isr;
do {
value = ioread8(&channel->regs->r.rhr);
flag = TTY_NORMAL;
/* Error: count statistics */
if (sr & SR_ERROR) {
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
unsigned char value;
unsigned int *pointer_write = &channel->pointer_write;
if (channel->nb_bytes <= 0) {
channel->nb_bytes = 0;
if (channel->nb_bytes == 0)
return;
}
value = channel->tty_port.xmit_buf[*pointer_write];
iowrite8(value, &channel->regs->w.thr);
@ -194,39 +183,27 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
(*pointer_write)++;
*pointer_write = *pointer_write % PAGE_SIZE;
channel->nb_bytes--;
if ((channel->nb_bytes == 0) &&
(waitqueue_active(&channel->queue))) {
if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
*channel->board_write = 1;
wake_up_interruptible(&channel->queue);
}
}
}
static void ipoctal_irq_channel(struct ipoctal_channel *channel)
{
u8 isr, sr;
/* If there is no client, skip the check */
if (!atomic_read(&channel->open))
return;
spin_lock(&channel->lock);
/* The HW is organized in pair of channels. See which register we need
* to read from */
isr = ioread8(&channel->block_regs->r.isr);
sr = ioread8(&channel->regs->r.sr);
/* In case of RS-485, change from TX to RX when finishing TX.
* Half-duplex. */
if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
(sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
*channel->board_write = 1;
wake_up_interruptible(&channel->queue);
/* In case of RS-485, change from TX to RX when finishing TX.
* Half-duplex. */
if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
channel->rx_enable = 1;
}
}
/* RX data */
@ -237,7 +214,7 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
ipoctal_irq_tx(channel);
tty_flip_buffer_push(&channel->tty_port);
spin_unlock(&channel->lock);
}
static irqreturn_t ipoctal_irq_handler(void *arg)
@ -245,14 +222,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
unsigned int i;
struct ipoctal *ipoctal = (struct ipoctal *) arg;
/* Check all channels */
for (i = 0; i < NR_CHANNELS; i++)
ipoctal_irq_channel(&ipoctal->channel[i]);
/* Clear the IPack device interrupt */
readw(ipoctal->int_space + ACK_INT_REQ0);
readw(ipoctal->int_space + ACK_INT_REQ1);
/* Check all channels */
for (i = 0; i < NR_CHANNELS; i++)
ipoctal_irq_channel(&ipoctal->channel[i]);
return IRQ_HANDLED;
}
@ -306,7 +283,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->mem8_space =
devm_ioremap_nocache(&ipoctal->dev->dev,
region->start, 0x8000);
if (!addr) {
if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] MEM8 space!\n",
bus_nr, slot);
@ -319,7 +296,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
struct ipoctal_channel *channel = &ipoctal->channel[i];
channel->regs = chan_regs + i;
channel->block_regs = block_regs + (i >> 1);
channel->board_write = &ipoctal->write;
channel->board_id = ipoctal->board_id;
if (i & 1) {
channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@ -330,6 +306,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
}
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@ -402,8 +379,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_reset_stats(&channel->stats);
channel->nb_bytes = 0;
init_waitqueue_head(&channel->queue);
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
@ -414,12 +389,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
continue;
}
dev_set_drvdata(tty_dev, channel);
/*
* Enable again the RX. TX will be enabled when
* there is something to send
*/
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
return 0;
@ -459,6 +428,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
/* As the IP-OCTAL 485 only supports half duplex, do it manually */
if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
channel->rx_enable = 0;
iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
}
@ -467,10 +437,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
* operations
*/
iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
wait_event_interruptible(channel->queue, *channel->board_write);
iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
*channel->board_write = 0;
return char_copied;
}
@ -622,8 +588,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
iowrite8(mr2, &channel->regs->w.mr);
iowrite8(csr, &channel->regs->w.csr);
/* Enable again the RX */
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
/* Enable again the RX, if it was before */
if (channel->rx_enable)
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
static void ipoctal_hangup(struct tty_struct *tty)
@ -643,6 +610,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@ -652,6 +620,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
wake_up_interruptible(&channel->tty_port.open_wait);
}
static void ipoctal_shutdown(struct tty_struct *tty)
{
struct ipoctal_channel *channel = tty->driver_data;
if (channel == NULL)
return;
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
}
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
.open = ipoctal_open,
@ -662,6 +646,7 @@ static const struct tty_operations ipoctal_fops = {
.chars_in_buffer = ipoctal_chars_in_buffer,
.get_icount = ipoctal_get_icount,
.hangup = ipoctal_hangup,
.shutdown = ipoctal_shutdown,
};
static int ipoctal_probe(struct ipack_device *dev)

View File

@ -84,6 +84,12 @@ int wm5102_patch(struct arizona *arizona)
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
},
[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
},
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@ -313,6 +319,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@ -1107,6 +1114,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HP_DACVAL:
case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
@ -1876,6 +1885,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:

View File

@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
config LATTICE_ECP3_CONFIG
tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
depends on SPI && SYSFS
select FW_LOADER
default n
help
This option enables support for bitstream configuration (programming
or loading) of the Lattice ECP3 FPGA family via SPI.
If unsure, say N.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
endmenu

View File

@ -49,3 +49,6 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o

View File

@ -1,6 +1,6 @@
config CB710_CORE
tristate "ENE CB710/720 Flash memory card reader support"
depends on PCI
depends on PCI && GENERIC_HARDIRQS
help
This option enables support for PCI ENE CB710/720 Flash memory card
reader found in some laptops (ie. some versions of HP Compaq nx9500).

View File

@ -0,0 +1,243 @@
/*
* Copyright (C) 2012 Stefan Roese <sr@denx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#define FIRMWARE_NAME "lattice-ecp3.bit"
/*
* The JTAG ID's of the supported FPGA's. The ID is 32bit wide
* reversed as noted in the manual.
*/
#define ID_ECP3_17 0xc2088080
#define ID_ECP3_35 0xc2048080
/* FPGA commands */
#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
#define FPGA_CMD_CLEAR 0x70
#define FPGA_CMD_REFRESH 0x71
#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
/*
* The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
* (LatticeECP3 Slave SPI Port User's Guide)
*/
#define FPGA_STATUS_DONE 0x00004000
#define FPGA_STATUS_CLEARED 0x00010000
#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
#define FPGA_CLEAR_MSLEEP 10
#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
struct fpga_data {
struct completion fw_loaded;
};
struct ecp3_dev {
u32 jedec_id;
char *name;
};
static const struct ecp3_dev ecp3_dev[] = {
{
.jedec_id = ID_ECP3_17,
.name = "Lattice ECP3-17",
},
{
.jedec_id = ID_ECP3_35,
.name = "Lattice ECP3-35",
},
};
static void firmware_load(const struct firmware *fw, void *context)
{
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = dev_get_drvdata(&spi->dev);
u8 *buffer;
int ret;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
int i;
u32 jedec_id;
u32 status;
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
return;
}
/* Fill dummy data (24 stuffing bits for commands) */
txbuf[1] = 0x00;
txbuf[2] = 0x00;
txbuf[3] = 0x00;
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
jedec_id = *(u32 *)&rxbuf[4];
for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
if (jedec_id == ecp3_dev[i].jedec_id)
break;
}
if (i == ARRAY_SIZE(ecp3_dev)) {
dev_err(&spi->dev,
"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
jedec_id);
return;
}
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
buffer = kzalloc(fw->size + 8, GFP_KERNEL);
if (!buffer) {
dev_err(&spi->dev, "Error: Can't allocate memory!\n");
return;
}
/*
* Insert WRITE_INC command into stream (one SPI frame)
*/
buffer[0] = FPGA_CMD_WRITE_INC;
buffer[1] = 0xff;
buffer[2] = 0xff;
buffer[3] = 0xff;
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
ret = spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = *(u32 *)&rxbuf[4];
if (status == FPGA_STATUS_CLEARED)
break;
msleep(FPGA_CLEAR_MSLEEP);
}
if (i == FPGA_CLEAR_LOOP_COUNT) {
dev_err(&spi->dev,
"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
status);
kfree(buffer);
return;
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
ret = spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
ret = spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
status = *(u32 *)&rxbuf[4];
/* Check result */
if (status & FPGA_STATUS_DONE)
dev_info(&spi->dev, "FPGA succesfully configured!\n");
else
dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
/*
* Don't forget to release the firmware again
*/
release_firmware(fw);
kfree(buffer);
complete(&data->fw_loaded);
}
static int lattice_ecp3_probe(struct spi_device *spi)
{
struct fpga_data *data;
int err;
data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
return -ENOMEM;
}
spi_set_drvdata(spi, data);
init_completion(&data->fw_loaded);
err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
FIRMWARE_NAME, &spi->dev,
GFP_KERNEL, spi, firmware_load);
if (err) {
dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
return err;
}
dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
return 0;
}
static int lattice_ecp3_remove(struct spi_device *spi)
{
struct fpga_data *data = spi_get_drvdata(spi);
wait_for_completion(&data->fw_loaded);
return 0;
}
static const struct spi_device_id lattice_ecp3_id[] = {
{ "ecp3-17", 0 },
{ "ecp3-35", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
static struct spi_driver lattice_ecp3_driver = {
.driver = {
.name = "lattice-ecp3",
.owner = THIS_MODULE,
},
.probe = lattice_ecp3_probe,
.remove = lattice_ecp3_remove,
.id_table = lattice_ecp3_id,
};
module_spi_driver(lattice_ecp3_driver);
MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
MODULE_LICENSE("GPL");

View File

@ -1,11 +1,22 @@
config INTEL_MEI
tristate "Intel Management Engine Interface (Intel MEI)"
tristate "Intel Management Engine Interface"
depends on X86 && PCI && WATCHDOG_CORE
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
if selected /dev/mei misc device will be created.
For more information see
<http://software.intel.com/en-us/manageability/>
config INTEL_MEI_ME
bool "ME Enabled Intel Chipsets"
depends on INTEL_MEI
depends on X86 && PCI && WATCHDOG_CORE
default y
help
MEI support for ME Enabled Intel chipsets.
Supported Chipsets are:
7 Series Chipset Family
6 Series Chipset Family
@ -24,5 +35,3 @@ config INTEL_MEI
82Q33 Express
82X38/X48 Express
For more information see
<http://software.intel.com/en-us/manageability/>

View File

@ -4,9 +4,11 @@
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
mei-objs += hbm.o
mei-objs += interrupt.o
mei-objs += interface.o
mei-objs += iorw.o
mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
mei-objs += wd.o
mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o

View File

@ -31,15 +31,16 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hw.h"
#include <linux/mei.h>
#include "interface.h"
#include "hbm.h"
#include "hw-me.h"
#include "client.h"
const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
0xa8, 0x46, 0xe0, 0xff, 0x65,
0x81, 0x4c);
const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
0xac, 0xa8, 0x46, 0xe0,
0xff, 0x65, 0x81, 0x4c);
/**
* mei_amthif_reset_params - initializes mei device iamthif
@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
* @dev: the device structure
*
*/
void mei_amthif_host_init(struct mei_device *dev)
int mei_amthif_host_init(struct mei_device *dev)
{
int i;
struct mei_cl *cl = &dev->iamthif_cl;
unsigned char *msg_buf;
int ret, i;
mei_cl_init(&dev->iamthif_cl, dev);
dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
/* find ME amthi client */
i = mei_me_cl_link(dev, &dev->iamthif_cl,
&mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
mei_cl_init(cl, dev);
i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
return;
dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
return -ENOENT;
}
cl->me_client_id = dev->me_clients[i].client_id;
/* Assign iamthif_mtu to the value received from ME */
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
msg_buf = kcalloc(dev->iamthif_mtu,
sizeof(unsigned char), GFP_KERNEL);
if (!msg_buf) {
dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
return;
dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
return -ENOMEM;
}
dev->iamthif_msg_buf = msg_buf;
if (mei_connect(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
dev->iamthif_cl.host_client_id = 0;
} else {
dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
if (ret < 0) {
dev_err(&dev->pdev->dev, "amthif: failed link client\n");
return -ENOENT;
}
cl->state = MEI_FILE_CONNECTING;
if (mei_hbm_cl_connect_req(dev, cl)) {
dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
cl->state = MEI_FILE_DISCONNECTED;
cl->host_client_id = 0;
} else {
cl->timer_count = MEI_CONNECT_TIMEOUT;
}
return 0;
}
/**
@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
if (i < 0) {
dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
return -ENODEV;
}
dev_dbg(&dev->pdev->dev, "checking amthi data\n");
dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
/* Check for if we can block or not*/
@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
return -EAGAIN;
dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
while (cb == NULL) {
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
@ -197,17 +210,17 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
dev_dbg(&dev->pdev->dev, "Got amthi data\n");
dev_dbg(&dev->pdev->dev, "Got amthif data\n");
dev->iamthif_timer = 0;
if (cb) {
timeout = cb->read_time +
mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
timeout);
if (time_after(jiffies, timeout)) {
dev_dbg(&dev->pdev->dev, "amthi Time out\n");
dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
rets = -ETIMEDOUT;
@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
cb->response_buffer.size);
dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
/* length is being turncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
}
free:
dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
*offset = 0;
mei_io_cb_free(cb);
out:
@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (!dev || !cb)
return -ENODEV;
dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
if (ret < 0)
return ret;
if (ret && dev->mei_host_buffer_is_empty) {
if (ret && dev->hbuf_is_ready) {
ret = 0;
dev->mei_host_buffer_is_empty = false;
if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
mei_hdr.length = mei_hbuf_max_data(dev);
dev->hbuf_is_ready = false;
if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = cb->request_buffer.size;
@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
if (mei_write_message(dev, &mei_hdr,
(unsigned char *)(dev->iamthif_msg_buf),
mei_hdr.length))
(unsigned char *)dev->iamthif_msg_buf))
return -ENODEV;
if (mei_hdr.msg_complete) {
if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
return -ENODEV;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
dev->iamthif_current_cb = cb;
dev->iamthif_file_object = cb->file_object;
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
if (!(dev->mei_host_buffer_is_empty))
if (!dev->hbuf_is_ready)
dev_dbg(&dev->pdev->dev, "host buffer is not empty");
dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
list_del(&pos->list);
@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
status = mei_amthif_send_cmd(dev, pos);
if (status) {
dev_dbg(&dev->pdev->dev,
"amthi write failed status = %d\n",
"amthif write failed status = %d\n",
status);
return;
}
@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
dev->iamthif_file_object == file) {
mask |= (POLLIN | POLLRDNORM);
dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
return mask;
@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
struct mei_msg_hdr *mei_hdr;
struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
size_t msg_slots = mei_data2slots(len);
mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->reserved = 0;
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
mei_hdr->length = len;
mei_hdr->msg_complete = 1;
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr->length = len;
mei_hdr->msg_complete = 0;
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
}
dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
mei_hdr->length, mei_hdr->msg_complete);
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
if (mei_write_message(dev, mei_hdr,
dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
mei_hdr->length)) {
if (mei_write_message(dev, &mei_hdr,
dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->status = -ENODEV;
list_del(&cb->list);
return -ENODEV;
}
if (mei_flow_ctrl_reduce(dev, cl))
if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
dev->iamthif_msg_buf_index += mei_hdr->length;
dev->iamthif_msg_buf_index += mei_hdr.length;
cl->status = 0;
if (mei_hdr->msg_complete) {
if (mei_hdr.msg_complete) {
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev->iamthif_flow_control_pending = true;
/* save iamthif cb sent to amthi client */
/* save iamthif cb sent to amthif client */
cb->buf_idx = dev->iamthif_msg_buf_index;
dev->iamthif_current_cb = cb;
@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
/**
* mei_amthif_irq_read_message - read routine after ISR to
* handle the read amthi message
* handle the read amthif message
*
* @complete_list: An instance of our list structure
* @dev: the device structure
* @mei_hdr: header of amthi message
* @mei_hdr: header of amthif message
*
* returns 0 on success, <0 on failure.
*/
@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
return 0;
dev_dbg(&dev->pdev->dev,
"amthi_message_buffer_index =%d\n",
"amthif_message_buffer_index =%d\n",
mei_hdr->length);
dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
if (!dev->iamthif_current_cb)
return -ENODEV;
@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
cb->read_time = jiffies;
if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
/* found the iamthif cb */
dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
list_add_tail(&cb->list, &complete_list->list);
}
return 0;
@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
return -EMSGSIZE;
}
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
return -EIO;
}
@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
return 0;
}
@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_msg_buf,
dev->iamthif_msg_buf_index);
list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
dev_dbg(&dev->pdev->dev, "amthi read completed\n");
dev_dbg(&dev->pdev->dev, "amthif read completed\n");
dev->iamthif_timer = jiffies;
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
mei_amthif_run_next_cmd(dev);
}
dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
wake_up_interruptible(&dev->iamthif_cl.wait);
}
@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
if (dev->iamthif_current_cb == cb_pos) {
dev->iamthif_current_cb = NULL;
/* send flow control to iamthif client */
mei_send_flow_control(dev, &dev->iamthif_cl);
mei_hbm_cl_flow_control_req(dev,
&dev->iamthif_cl);
}
/* free all allocated buffers */
mei_io_cb_free(cb_pos);
@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_file_object == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
dev->iamthif_state);
dev->iamthif_canceled = true;
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
mei_amthif_run_next_cmd(dev);
}
}

729
drivers/misc/mei/client.c Normal file
View File

@ -0,0 +1,729 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
/**
* mei_me_cl_by_uuid - locate index of me client
*
* @dev: mei device
* returns me client index or -ENOENT if not found
*/
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
{
int i, res = -ENOENT;
for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(*uuid,
dev->me_clients[i].props.protocol_name) == 0) {
res = i;
break;
}
return res;
}
/**
* mei_me_cl_by_id return index to me_clients for client_id
*
* @dev: the device structure
* @client_id: me client id
*
* Locking: called under "dev->device_lock" lock
*
* returns index on success, -ENOENT on failure.
*/
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
int i;
for (i = 0; i < dev->me_clients_num; i++)
if (dev->me_clients[i].client_id == client_id)
break;
if (WARN_ON(dev->me_clients[i].client_id != client_id))
return -ENOENT;
if (i == dev->me_clients_num)
return -ENOENT;
return i;
}
/**
* mei_io_list_flush - removes list entry belonging to cl.
*
* @list: An instance of our list structure
* @cl: host client
*/
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *next;
list_for_each_entry_safe(cb, next, &list->list, list) {
if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
list_del(&cb->list);
}
}
/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
*/
void mei_io_cb_free(struct mei_cl_cb *cb)
{
if (cb == NULL)
return;
kfree(cb->request_buffer.data);
kfree(cb->response_buffer.data);
kfree(cb);
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl - mei client
* @file: pointer to file structure
*
* returns mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
{
struct mei_cl_cb *cb;
cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
if (!cb)
return NULL;
mei_io_list_init(cb);
cb->file_object = fp;
cb->cl = cl;
cb->buf_idx = 0;
return cb;
}
/**
* mei_io_cb_alloc_req_buf - allocate request buffer
*
* @cb - io callback structure
* @size: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->request_buffer.data)
return -ENOMEM;
cb->request_buffer.size = length;
return 0;
}
/**
* mei_io_cb_alloc_req_buf - allocate respose buffer
*
* @cb - io callback structure
* @size: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->response_buffer.data)
return -ENOMEM;
cb->response_buffer.size = length;
return 0;
}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @dev: the device structure
* @cl: host client
*/
int mei_cl_flush_queues(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
mei_io_list_flush(&cl->dev->write_list, cl);
mei_io_list_flush(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
return 0;
}
/**
* mei_cl_init - initializes intialize cl.
*
* @cl: host client to be initialized
* @dev: mei device
*/
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
memset(cl, 0, sizeof(struct mei_cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
INIT_LIST_HEAD(&cl->link);
cl->reading_state = MEI_IDLE;
cl->writing_state = MEI_IDLE;
cl->dev = dev;
}
/**
* mei_cl_allocate - allocates cl structure and sets it up.
*
* @dev: mei device
* returns The allocated file or NULL on failure
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
if (!cl)
return NULL;
mei_cl_init(cl, dev);
return cl;
}
/**
* mei_cl_find_read_cb - find this cl's callback in the read list
*
* @dev: device structure
* returns cb on success, NULL on error
*/
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb = NULL;
struct mei_cl_cb *next = NULL;
list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
if (mei_cl_cmp_id(cl, cb->cl))
return cb;
return NULL;
}
/** mei_cl_link: allocte host id in the host map
*
* @cl - host client
* @id - fixed host id or -1 for genereting one
* returns 0 on success
* -EINVAL on incorrect values
* -ENONET if client not found
*/
int mei_cl_link(struct mei_cl *cl, int id)
{
struct mei_device *dev;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
/* If Id is not asigned get one*/
if (id == MEI_HOST_CLIENT_ID_ANY)
id = find_first_zero_bit(dev->host_clients_map,
MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
return -ENOENT;
}
dev->open_handle_count++;
cl->host_client_id = id;
list_add_tail(&cl->link, &dev->file_list);
set_bit(id, dev->host_clients_map);
cl->state = MEI_FILE_INITIALIZING;
dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
return 0;
}
/**
* mei_cl_unlink - remove me_cl from the list
*
* @dev: the device structure
*/
int mei_cl_unlink(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl *pos, *next;
/* don't shout on error exit path */
if (!cl)
return 0;
/* wd and amthif might not be initialized */
if (!cl->dev)
return 0;
dev = cl->dev;
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if (cl->host_client_id == pos->host_client_id) {
dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
pos->host_client_id, pos->me_client_id);
list_del_init(&pos->link);
break;
}
}
return 0;
}
void mei_host_client_init(struct work_struct *work)
{
struct mei_device *dev = container_of(work,
struct mei_device, init_work);
struct mei_client_properties *client_props;
int i;
mutex_lock(&dev->device_lock);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
/*
* Reserving the first three client IDs
* 0: Reserved for MEI Bus Message communications
* 1: Reserved for Watchdog
* 2: Reserved for AMTHI
*/
bitmap_set(dev->host_clients_map, 0, 3);
for (i = 0; i < dev->me_clients_num; i++) {
client_props = &dev->me_clients[i].props;
if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
mei_amthif_host_init(dev);
else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
mei_wd_host_init(dev);
}
dev->dev_state = MEI_DEV_ENABLED;
mutex_unlock(&dev->device_lock);
}
/**
* mei_cl_disconnect - disconnect host clinet form the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets, err;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
cb->fop_type = MEI_FOP_CLOSE;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_disconnect_req(dev, cl)) {
rets = -ENODEV;
dev_err(&dev->pdev->dev, "failed to disconnect.\n");
goto free;
}
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
MEI_FILE_DISCONNECTED == cl->state,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (MEI_FILE_DISCONNECTED == cl->state) {
rets = 0;
dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
} else {
rets = -ENODEV;
if (MEI_FILE_DISCONNECTED != cl->state)
dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
if (err)
dev_dbg(&dev->pdev->dev,
"wait failed disconnect err=%08x\n",
err);
dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
}
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_is_other_connecting - checks if other
* client with the same me client id is connecting
*
* @cl: private data of the file object
*
* returns ture if other client is connected, 0 - otherwise.
*/
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl *pos;
struct mei_cl *next;
if (WARN_ON(!cl || !cl->dev))
return false;
dev = cl->dev;
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if ((pos->state == MEI_FILE_CONNECTING) &&
(pos != cl) && cl->me_client_id == pos->me_client_id)
return true;
}
return false;
}
/**
* mei_cl_connect - connect host clinet to the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
goto out;
}
cb->fop_type = MEI_FOP_IOCTL;
if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
cl->timer_count = MEI_CONNECT_TIMEOUT;
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
rets = wait_event_timeout(dev->wait_recvd_msg,
(cl->state == MEI_FILE_CONNECTED ||
cl->state == MEI_FILE_DISCONNECTED),
timeout * HZ);
mutex_lock(&dev->device_lock);
if (cl->state != MEI_FILE_CONNECTED) {
rets = -EFAULT;
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
goto out;
}
rets = cl->status;
out:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
*
* @dev: the device structure
* @cl: private data of the file object
*
* returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
* -ENOENT if mei_cl is not present
* -EINVAL if single_recv_buf == 0
*/
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
struct mei_device *dev;
int i;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
if (!dev->me_clients_num)
return 0;
if (cl->mei_flow_ctrl_creds > 0)
return 1;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->mei_flow_ctrl_creds) {
if (WARN_ON(me_cl->props.single_recv_buf == 0))
return -EINVAL;
return 1;
} else {
return 0;
}
}
}
return -ENOENT;
}
/**
* mei_cl_flow_ctrl_reduce - reduces flow_control.
*
* @dev: the device structure
* @cl: private data of the file object
* @returns
* 0 on success
* -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
struct mei_device *dev;
int i;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
if (!dev->me_clients_num)
return -ENOENT;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->props.single_recv_buf != 0) {
if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
dev->me_clients[i].mei_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
return 0;
}
}
return -ENOENT;
}
/**
* mei_cl_start_read - the start read client message function.
*
* @cl: host client
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_read_start(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
int i;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (cl->state != MEI_FILE_CONNECTED)
return -ENODEV;
if (dev->dev_state != MEI_DEV_ENABLED)
return -ENODEV;
if (cl->read_cb) {
dev_dbg(&dev->pdev->dev, "read is pending.\n");
return -EBUSY;
}
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
dev_err(&dev->pdev->dev, "no such me client %d\n",
cl->me_client_id);
return -ENODEV;
}
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
rets = mei_io_cb_alloc_resp_buf(cb,
dev->me_clients[i].props.max_msg_length);
if (rets)
goto err;
cb->fop_type = MEI_FOP_READ;
cl->read_cb = cb;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
rets = -ENODEV;
goto err;
}
list_add_tail(&cb->list, &dev->read_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
return rets;
err:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_all_disconnect - disconnect forcefully all connected clients
*
* @dev - mei device
*/
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
cl->read_cb = NULL;
cl->timer_count = 0;
}
}
/**
* mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
*
* @dev - mei device
*/
void mei_cl_all_read_wakeup(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
dev_dbg(&dev->pdev->dev, "Waking up client!\n");
wake_up_interruptible(&cl->rx_wait);
}
}
}
/**
* mei_cl_all_write_clear - clear all pending writes
* @dev - mei device
*/
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
}

102
drivers/misc/mei/client.h Normal file
View File

@ -0,0 +1,102 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _MEI_CLIENT_H_
#define _MEI_CLIENT_H_
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
#include "mei_dev.h"
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
/*
* MEI IO Functions
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
/**
* mei_io_list_init - Sets up a queue list.
*
* @list: An instance cl callback structure
*/
static inline void mei_io_list_init(struct mei_cl_cb *list)
{
INIT_LIST_HEAD(&list->list);
}
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
/*
* MEI Host Client Functions
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev);
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
int mei_cl_link(struct mei_cl *cl, int id);
int mei_cl_unlink(struct mei_cl *cl);
int mei_cl_flush_queues(struct mei_cl *cl);
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
/**
* mei_cl_cmp_id - tells if file private data have same id
*
* @fe1: private data of 1. file object
* @fe2: private data of 2. file object
*
* returns true - if ids are the same and not NULL
*/
static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
const struct mei_cl *cl2)
{
return cl1 && cl2 &&
(cl1->host_client_id == cl2->host_client_id) &&
(cl1->me_client_id == cl2->me_client_id);
}
int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
bool mei_cl_is_other_connecting(struct mei_cl *cl);
int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_read_start(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
void mei_host_client_init(struct work_struct *work);
void mei_cl_all_disconnect(struct mei_device *dev);
void mei_cl_all_read_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
#endif /* _MEI_CLIENT_H_ */

669
drivers/misc/mei/hbm.c Normal file
View File

@ -0,0 +1,669 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "hw-me.h"
/**
* mei_hbm_me_cl_allocate - allocates storage for me clients
*
* @dev: the device structure
*
* returns none.
*/
static void mei_hbm_me_cl_allocate(struct mei_device *dev)
{
struct mei_me_client *clients;
int b;
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
dev->me_clients_num++;
if (dev->me_clients_num <= 0)
return;
kfree(dev->me_clients);
dev->me_clients = NULL;
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
clients = kcalloc(dev->me_clients_num,
sizeof(struct mei_me_client), GFP_KERNEL);
if (!clients) {
dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
dev->dev_state = MEI_DEV_RESETING;
mei_reset(dev, 1);
return;
}
dev->me_clients = clients;
return;
}
/**
* mei_hbm_cl_hdr - construct client hbm header
* @cl: - client
* @hbm_cmd: host bus message command
* @buf: buffer for cl header
* @len: buffer length
*/
static inline
void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
{
struct mei_hbm_cl_cmd *cmd = buf;
memset(cmd, 0, len);
cmd->hbm_cmd = hbm_cmd;
cmd->host_addr = cl->host_client_id;
cmd->me_addr = cl->me_client_id;
}
/**
* same_disconn_addr - tells if they have the same address
*
* @file: private data of the file object.
* @disconn: disconnection request.
*
* returns true if addres are same
*/
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
{
struct mei_hbm_cl_cmd *cmd = buf;
return cl->host_client_id == cmd->host_addr &&
cl->me_client_id == cmd->me_addr;
}
/**
* is_treat_specially_client - checks if the message belongs
* to the file private data.
*
* @cl: private data of the file object
* @rs: connect response bus message
*
*/
static bool is_treat_specially_client(struct mei_cl *cl,
struct hbm_client_connect_response *rs)
{
if (mei_hbm_cl_addr_equal(cl, rs)) {
if (!rs->status) {
cl->state = MEI_FILE_CONNECTED;
cl->status = 0;
} else {
cl->state = MEI_FILE_DISCONNECTED;
cl->status = -ENODEV;
}
cl->timer_count = 0;
return true;
}
return false;
}
/**
* mei_hbm_start_req - sends start request message.
*
* @dev: the device structure
*/
void mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_version_request *start_req;
const size_t len = sizeof(struct hbm_host_version_request);
mei_hbm_hdr(mei_hdr, len);
/* host start message */
start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
memset(start_req, 0, len);
start_req->hbm_cmd = HOST_START_REQ_CMD;
start_req->host_version.major_version = HBM_MAJOR_VERSION;
start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->dev_state = MEI_DEV_RESETING;
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_START_MESSAGE;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return ;
}
/**
* mei_hbm_enum_clients_req - sends enumeration client request message.
*
* @dev: the device structure
*
* returns none.
*/
static void mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_enum_request *enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
/* enumerate clients */
mei_hbm_hdr(mei_hdr, len);
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
dev->dev_state = MEI_DEV_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return;
}
/**
* mei_hbm_prop_requsest - request property for a single client
*
* @dev: the device structure
*
* returns none.
*/
static int mei_hbm_prop_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_props_request *prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long next_client_index;
u8 client_num;
client_num = dev->me_client_presentation_num;
next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
dev->me_client_index);
/* We got all client properties */
if (next_client_index == MEI_CLIENTS_MAX) {
schedule_work(&dev->init_work);
return 0;
}
dev->me_clients[client_num].client_id = next_client_index;
dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
mei_hbm_hdr(mei_hdr, len);
prop_req = (struct hbm_props_request *)dev->wr_msg.data;
memset(prop_req, 0, sizeof(struct hbm_props_request));
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req->address = next_client_index;
if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
dev->dev_state = MEI_DEV_RESETING;
dev_err(&dev->pdev->dev, "Properties request command failed\n");
mei_reset(dev, 1);
return -EIO;
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
dev->me_client_index = next_client_index;
return 0;
}
/**
* mei_hbm_stop_req_prepare - perpare stop request message
*
* @dev - mei device
* @mei_hdr - mei message header
* @data - hbm message body buffer
*/
static void mei_hbm_stop_req_prepare(struct mei_device *dev,
struct mei_msg_hdr *mei_hdr, unsigned char *data)
{
struct hbm_host_stop_request *req =
(struct hbm_host_stop_request *)data;
const size_t len = sizeof(struct hbm_host_stop_request);
mei_hbm_hdr(mei_hdr, len);
memset(req, 0, len);
req->hbm_cmd = HOST_STOP_REQ_CMD;
req->reason = DRIVER_STOP_REQUEST;
}
/**
* mei_hbm_cl_flow_control_req - sends flow control requst.
*
* @dev: the device structure
* @cl: client info
*
* This function returns -EIO on write failure
*/
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
const size_t len = sizeof(struct hbm_flow_control);
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
/**
* add_single_flow_creds - adds single buffer credentials.
*
* @file: private data ot the file object.
* @flow: flow control.
*/
static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
struct mei_me_client *client;
int i;
for (i = 0; i < dev->me_clients_num; i++) {
client = &dev->me_clients[i];
if (client && flow->me_addr == client->client_id) {
if (client->props.single_recv_buf) {
client->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
flow->me_addr);
dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
client->mei_flow_ctrl_creds);
} else {
BUG(); /* error in flow control */
}
}
}
}
/**
* mei_hbm_cl_flow_control_res - flow control response from me
*
* @dev: the device structure
* @flow_control: flow control response bus message
*/
static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
struct hbm_flow_control *flow_control)
{
struct mei_cl *cl = NULL;
struct mei_cl *next = NULL;
if (!flow_control->host_addr) {
/* single receive buffer */
mei_hbm_add_single_flow_creds(dev, flow_control);
return;
}
/* normal connection */
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, flow_control)) {
cl->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
flow_control->host_addr, flow_control->me_addr);
dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
cl->mei_flow_ctrl_creds);
break;
}
}
}
/**
* mei_hbm_cl_disconnect_req - sends disconnect message to fw.
*
* @dev: the device structure
* @cl: a client to disconnect from
*
* This function returns -EIO on write failure
*/
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
const size_t len = sizeof(struct hbm_client_connect_request);
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
/**
* mei_hbm_cl_disconnect_res - disconnect response from ME
*
* @dev: the device structure
* @rs: disconnect response bus message
*/
static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"disconnect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
cl = pos->cl;
if (!cl) {
list_del(&pos->list);
return;
}
dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
if (mei_hbm_cl_addr_equal(cl, rs)) {
list_del(&pos->list);
if (!rs->status)
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
/**
* mei_hbm_cl_connect_req - send connection request to specific me client
*
* @dev: the device structure
* @cl: a client to connect to
*
* returns -EIO on write failure
*/
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
const size_t len = sizeof(struct hbm_client_connect_request);
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
/**
* mei_hbm_cl_connect_res - connect resposne from the ME
*
* @dev: the device structure
* @rs: connect response bus message
*/
static void mei_hbm_cl_connect_res(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"connect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&dev->wd_cl, rs)) {
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
mei_watchdog_register(dev);
return;
}
if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
cl = pos->cl;
if (!cl) {
list_del(&pos->list);
return;
}
if (pos->fop_type == MEI_FOP_IOCTL) {
if (is_treat_specially_client(cl, rs)) {
list_del(&pos->list);
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
}
/**
* mei_client_disconnect_request - disconnect request initiated by me
* host sends disoconnect response
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
*/
static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
struct hbm_client_connect_request *disconnect_req)
{
struct mei_cl *cl, *next;
const size_t len = sizeof(struct hbm_client_connect_response);
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
cl->state = MEI_FILE_DISCONNECTED;
cl->timer_count = 0;
if (cl == &dev->wd_cl)
dev->wd_pending = false;
else if (cl == &dev->iamthif_cl)
dev->iamthif_timer = 0;
/* prepare disconnect response */
mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
dev->wr_ext_msg.data, len);
break;
}
}
}
/**
* mei_hbm_dispatch - bottom half read routine after ISR to
* handle the read bus message cmd processing.
*
* @dev: the device structure
* @mei_hdr: header of bus message
*/
void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
struct mei_bus_message *mei_msg;
struct mei_me_client *me_client;
struct hbm_host_version_response *version_res;
struct hbm_client_connect_response *connect_res;
struct hbm_client_connect_response *disconnect_res;
struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *flow_control;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
/* read the message to our buffer */
BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
version_res = (struct hbm_host_version_response *)mei_msg;
if (!version_res->host_version_supported) {
dev->version = version_res->me_max_version;
dev_dbg(&dev->pdev->dev, "version mismatch.\n");
mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
dev->wr_msg.data);
mei_write_message(dev, &dev->wr_msg.hdr,
dev->wr_msg.data);
return;
}
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->init_clients_state == MEI_START_MESSAGE) {
dev->init_clients_timer = 0;
mei_hbm_enum_clients_req(dev);
} else {
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
mei_reset(dev, 1);
return;
}
dev->recvd_msg = true;
dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
case CLIENT_CONNECT_RES_CMD:
connect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_connect_res(dev, connect_res);
dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_disconnect_res(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case MEI_FLOW_CONTROL_CMD:
flow_control = (struct hbm_flow_control *) mei_msg;
mei_hbm_cl_flow_control_res(dev, flow_control);
dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
props_res = (struct hbm_props_response *)mei_msg;
me_client = &dev->me_clients[dev->me_client_presentation_num];
if (props_res->status || !dev->me_clients) {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
mei_reset(dev, 1);
return;
}
if (me_client->client_id != props_res->address) {
dev_err(&dev->pdev->dev,
"Host client properties reply mismatch\n");
mei_reset(dev, 1);
return;
}
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
dev_err(&dev->pdev->dev,
"Unexpected client properties reply\n");
mei_reset(dev, 1);
return;
}
me_client->props = props_res->client_properties;
dev->me_client_index++;
dev->me_client_presentation_num++;
/* request property for the next client */
mei_hbm_prop_req(dev);
break;
case HOST_ENUM_RES_CMD:
enum_res = (struct hbm_host_enum_response *) mei_msg;
memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
dev->init_clients_timer = 0;
dev->me_client_presentation_num = 0;
dev->me_client_index = 0;
mei_hbm_me_cl_allocate(dev);
dev->init_clients_state =
MEI_CLIENT_PROPERTIES_MESSAGE;
/* first property reqeust */
mei_hbm_prop_req(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
mei_reset(dev, 1);
return;
}
break;
case HOST_STOP_RES_CMD:
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
mei_reset(dev, 1);
break;
case CLIENT_DISCONNECT_REQ_CMD:
/* search for client */
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
dev->wr_ext_msg.data);
break;
default:
BUG();
break;
}
}

39
drivers/misc/mei/hbm.h Normal file
View File

@ -0,0 +1,39 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _MEI_HBM_H_
#define _MEI_HBM_H_
void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
{
hdr->host_addr = 0;
hdr->me_addr = 0;
hdr->length = length;
hdr->msg_complete = 1;
hdr->reserved = 0;
}
void mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
#endif /* _MEI_HBM_H_ */

View File

@ -0,0 +1,167 @@
/******************************************************************************
* Intel Management Engine Interface (Intel MEI) Linux driver
* Intel MEI Interface Header
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Corporation.
* linux-mei@linux.intel.com
* http://www.intel.com
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef _MEI_HW_MEI_REGS_H_
#define _MEI_HW_MEI_REGS_H_
/*
* MEI device IDs
*/
#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
/*
* MEI HW Section
*/
/* MEI registers */
/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
#define H_CB_WW 0
/* H_CSR - Host Control Status register */
#define H_CSR 4
/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
#define ME_CB_RW 8
/* ME_CSR_HA - ME Control Status Host Access register (read only) */
#define ME_CSR_HA 0xC
/* register bits of H_CSR (Host Control Status register) */
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
#define H_CBD 0xFF000000
/* Host Circular Buffer Write Pointer */
#define H_CBWP 0x00FF0000
/* Host Circular Buffer Read Pointer */
#define H_CBRP 0x0000FF00
/* Host Reset */
#define H_RST 0x00000010
/* Host Ready */
#define H_RDY 0x00000008
/* Host Interrupt Generate */
#define H_IG 0x00000004
/* Host Interrupt Status */
#define H_IS 0x00000002
/* Host Interrupt Enable */
#define H_IE 0x00000001
/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
access to ME_CBD */
#define ME_CBD_HRA 0xFF000000
/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
#define ME_CBWP_HRA 0x00FF0000
/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
#define ME_CBRP_HRA 0x0000FF00
/* ME Reset HRA - host read only access to ME_RST */
#define ME_RST_HRA 0x00000010
/* ME Ready HRA - host read only access to ME_RDY */
#define ME_RDY_HRA 0x00000008
/* ME Interrupt Generate HRA - host read only access to ME_IG */
#define ME_IG_HRA 0x00000004
/* ME Interrupt Status HRA - host read only access to ME_IS */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
#endif /* _MEI_HW_MEI_REGS_H_ */

576
drivers/misc/mei/hw-me.c Normal file
View File

@ -0,0 +1,576 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include "mei_dev.h"
#include "hw-me.h"
#include "hbm.h"
/**
* mei_reg_read - Reads 32bit data from the mei device
*
* @dev: the device structure
* @offset: offset from which to read the data
*
* returns register value (u32)
*/
static inline u32 mei_reg_read(const struct mei_me_hw *hw,
unsigned long offset)
{
return ioread32(hw->mem_addr + offset);
}
/**
* mei_reg_write - Writes 32bit data to the mei device
*
* @dev: the device structure
* @offset: offset from which to write the data
* @value: register value to write (u32)
*/
static inline void mei_reg_write(const struct mei_me_hw *hw,
unsigned long offset, u32 value)
{
iowrite32(value, hw->mem_addr + offset);
}
/**
* mei_mecbrw_read - Reads 32bit data from ME circular buffer
* read window register
*
* @dev: the device structure
*
* returns ME_CB_RW register value (u32)
*/
static u32 mei_me_mecbrw_read(const struct mei_device *dev)
{
return mei_reg_read(to_me_hw(dev), ME_CB_RW);
}
/**
* mei_mecsr_read - Reads 32bit data from the ME CSR
*
* @dev: the device structure
*
* returns ME_CSR_HA register value (u32)
*/
static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
{
return mei_reg_read(hw, ME_CSR_HA);
}
/**
* mei_hcsr_read - Reads 32bit data from the host CSR
*
* @dev: the device structure
*
* returns H_CSR register value (u32)
*/
static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
{
return mei_reg_read(hw, H_CSR);
}
/**
* mei_hcsr_set - writes H_CSR register to the mei device,
* and ignores the H_IS bit for it is write-one-to-zero.
*
* @dev: the device structure
*/
static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
{
hcsr &= ~H_IS;
mei_reg_write(hw, H_CSR, hcsr);
}
/**
* me_hw_config - configure hw dependent settings
*
* @dev: mei device
*/
static void mei_me_hw_config(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(to_me_hw(dev));
/* Doesn't change in runtime */
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
}
/**
* mei_clear_interrupts - clear and stop interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_clear(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
if ((hcsr & H_IS) == H_IS)
mei_reg_write(hw, H_CSR, hcsr);
}
/**
* mei_me_intr_enable - enables mei device interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
hcsr |= H_IE;
mei_hcsr_set(hw, hcsr);
}
/**
* mei_disable_interrupts - disables mei device interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_disable(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
hcsr &= ~H_IE;
mei_hcsr_set(hw, hcsr);
}
/**
* mei_me_hw_reset - resets fw via mei csr register.
*
* @dev: the device structure
* @interrupts_enabled: if interrupt should be enabled after reset.
*/
static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
hcsr |= (H_RST | H_IG);
if (intr_enable)
hcsr |= H_IE;
else
hcsr &= ~H_IE;
mei_hcsr_set(hw, hcsr);
hcsr = mei_hcsr_read(hw) | H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(hw, hcsr);
hcsr = mei_hcsr_read(hw);
dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
}
/**
* mei_me_host_set_ready - enable device
*
* @dev - mei device
* returns bool
*/
static void mei_me_host_set_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
hw->host_hw_state |= H_IE | H_IG | H_RDY;
mei_hcsr_set(hw, hw->host_hw_state);
}
/**
* mei_me_host_is_ready - check whether the host has turned ready
*
* @dev - mei device
* returns bool
*/
static bool mei_me_host_is_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
hw->host_hw_state = mei_hcsr_read(hw);
return (hw->host_hw_state & H_RDY) == H_RDY;
}
/**
* mei_me_hw_is_ready - check whether the me(hw) has turned ready
*
* @dev - mei device
* returns bool
*/
static bool mei_me_hw_is_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
hw->me_hw_state = mei_mecsr_read(hw);
return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
}
/**
* mei_hbuf_filled_slots - gets number of device filled buffer slots
*
* @dev: the device structure
*
* returns number of filled slots
*/
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
char read_ptr, write_ptr;
hw->host_hw_state = mei_hcsr_read(hw);
read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
return (unsigned char) (write_ptr - read_ptr);
}
/**
* mei_hbuf_is_empty - checks if host buffer is empty.
*
* @dev: the device structure
*
* returns true if empty, false - otherwise.
*/
static bool mei_me_hbuf_is_empty(struct mei_device *dev)
{
return mei_hbuf_filled_slots(dev) == 0;
}
/**
* mei_me_hbuf_empty_slots - counts write empty slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
unsigned char filled_slots, empty_slots;
filled_slots = mei_hbuf_filled_slots(dev);
empty_slots = dev->hbuf_depth - filled_slots;
/* check for overflow */
if (filled_slots > dev->hbuf_depth)
return -EOVERFLOW;
return empty_slots;
}
static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
{
return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
}
/**
* mei_write_message - writes a message to mei device.
*
* @dev: the device structure
* @header: mei HECI header of message
* @buf: message payload will be written
*
* This function returns -EIO if write has failed
*/
static int mei_me_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
unsigned char *buf)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned long rem, dw_cnt;
unsigned long length = header->length;
u32 *reg_buf = (u32 *)buf;
u32 hcsr;
int i;
int empty_slots;
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
empty_slots = mei_hbuf_empty_slots(dev);
dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
dw_cnt = mei_data2slots(length);
if (empty_slots < 0 || dw_cnt > empty_slots)
return -EIO;
mei_reg_write(hw, H_CB_WW, *((u32 *) header));
for (i = 0; i < length / 4; i++)
mei_reg_write(hw, H_CB_WW, reg_buf[i]);
rem = length & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(&reg, &buf[length - rem], rem);
mei_reg_write(hw, H_CB_WW, reg);
}
hcsr = mei_hcsr_read(hw) | H_IG;
mei_hcsr_set(hw, hcsr);
if (!mei_me_hw_is_ready(dev))
return -EIO;
return 0;
}
/**
* mei_me_count_full_read_slots - counts read full slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
*/
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
hw->me_hw_state = mei_mecsr_read(hw);
buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
filled_slots = (unsigned char) (write_ptr - read_ptr);
/* check for overflow */
if (filled_slots > buffer_depth)
return -EOVERFLOW;
dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
/**
* mei_me_read_slots - reads a message from mei device.
*
* @dev: the device structure
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 *reg_buf = (u32 *)buffer;
u32 hcsr;
for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
*reg_buf++ = mei_me_mecbrw_read(dev);
if (buffer_length > 0) {
u32 reg = mei_me_mecbrw_read(dev);
memcpy(reg_buf, &reg, buffer_length);
}
hcsr = mei_hcsr_read(hw) | H_IG;
mei_hcsr_set(hw, hcsr);
return 0;
}
/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*/
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_me_hw *hw = to_me_hw(dev);
u32 csr_reg = mei_hcsr_read(hw);
if ((csr_reg & H_IS) != H_IS)
return IRQ_NONE;
/* clear H_IS bit in H_CSR */
mei_reg_write(hw, H_CSR, csr_reg);
return IRQ_WAKE_THREAD;
}
/**
* mei_me_irq_thread_handler - function called after ISR to handle the interrupt
* processing.
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*
*/
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
struct mei_cl *cl;
s32 slots;
int rets;
bool bus_message_received;
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
mei_io_list_init(&complete_list);
/* Ack the interrupt here
* In case of MSI we don't go through the quick handler */
if (pci_dev_msi_enabled(dev->pdev))
mei_clear_interrupts(dev);
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) &&
dev->dev_state != MEI_DEV_RESETING &&
dev->dev_state != MEI_DEV_INITIALIZING) {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
mei_host_set_ready(dev);
dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
/* link is established * start sending messages. */
dev->dev_state = MEI_DEV_INIT_CLIENTS;
mei_hbm_start_req(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
} else {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
/* we have urgent data to send so break the read */
if (dev->wr_ext_msg.hdr.length)
break;
dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
rets = mei_irq_read_handler(dev, &complete_list, &slots);
if (rets)
goto end;
}
rets = mei_irq_write_handler(dev, &complete_list);
end:
dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
bus_message_received = false;
if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
bus_message_received = true;
}
mutex_unlock(&dev->device_lock);
if (bus_message_received) {
dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
if (list_empty(&complete_list.list))
return IRQ_HANDLED;
list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
cl = cb_pos->cl;
list_del(&cb_pos->list);
if (cl) {
if (cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "completing call back.\n");
mei_irq_complete_handler(cl, cb_pos);
cb_pos = NULL;
} else if (cl == &dev->iamthif_cl) {
mei_amthif_complete(dev, cb_pos);
}
}
}
return IRQ_HANDLED;
}
static const struct mei_hw_ops mei_me_hw_ops = {
.host_set_ready = mei_me_host_set_ready,
.host_is_ready = mei_me_host_is_ready,
.hw_is_ready = mei_me_hw_is_ready,
.hw_reset = mei_me_hw_reset,
.hw_config = mei_me_hw_config,
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
.hbuf_max_len = mei_me_hbuf_max_len,
.write = mei_me_write_message,
.rdbuf_full_slots = mei_me_count_full_read_slots,
.read_hdr = mei_me_mecbrw_read,
.read = mei_me_read_slots
};
/**
* init_mei_device - allocates and initializes the mei device structure
*
* @pdev: The pci device structure
*
* returns The mei_device_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = kzalloc(sizeof(struct mei_device) +
sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
mei_device_init(dev);
INIT_LIST_HEAD(&dev->wd_cl.link);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
mei_io_list_init(&dev->amthif_cmd_list);
mei_io_list_init(&dev->amthif_rd_complete_list);
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
INIT_WORK(&dev->init_work, mei_host_client_init);
dev->ops = &mei_me_hw_ops;
dev->pdev = pdev;
return dev;
}

48
drivers/misc/mei/hw-me.h Normal file
View File

@ -0,0 +1,48 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _MEI_INTERFACE_H_
#define _MEI_INTERFACE_H_
#include <linux/mei.h>
#include "mei_dev.h"
#include "client.h"
struct mei_me_hw {
void __iomem *mem_addr;
/*
* hw states of host and fw(ME)
*/
u32 host_hw_state;
u32 me_hw_state;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
/* get slots (dwords) from a message length + header (bytes) */
static inline unsigned char mei_data2slots(size_t length)
{
return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
}
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
#endif /* _MEI_INTERFACE_H_ */

View File

@ -31,109 +31,6 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
/*
* Internal Clients Number
*/
#define MEI_WD_HOST_CLIENT_ID 1
#define MEI_IAMTHIF_HOST_CLIENT_ID 2
/*
* MEI device IDs
*/
#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
/*
* MEI HW Section
*/
/* MEI registers */
/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
#define H_CB_WW 0
/* H_CSR - Host Control Status register */
#define H_CSR 4
/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
#define ME_CB_RW 8
/* ME_CSR_HA - ME Control Status Host Access register (read only) */
#define ME_CSR_HA 0xC
/* register bits of H_CSR (Host Control Status register) */
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
#define H_CBD 0xFF000000
/* Host Circular Buffer Write Pointer */
#define H_CBWP 0x00FF0000
/* Host Circular Buffer Read Pointer */
#define H_CBRP 0x0000FF00
/* Host Reset */
#define H_RST 0x00000010
/* Host Ready */
#define H_RDY 0x00000008
/* Host Interrupt Generate */
#define H_IG 0x00000004
/* Host Interrupt Status */
#define H_IS 0x00000002
/* Host Interrupt Enable */
#define H_IE 0x00000001
/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
access to ME_CBD */
#define ME_CBD_HRA 0xFF000000
/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
#define ME_CBWP_HRA 0x00FF0000
/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
#define ME_CBRP_HRA 0x0000FF00
/* ME Reset HRA - host read only access to ME_RST */
#define ME_RST_HRA 0x00000010
/* ME Ready HRA - host read only access to ME_RDY */
#define ME_RDY_HRA 0x00000008
/* ME Interrupt Generate HRA - host read only access to ME_IG */
#define ME_IG_HRA 0x00000004
/* ME Interrupt Status HRA - host read only access to ME_IS */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
/*
* MEI Version
@ -224,6 +121,22 @@ struct mei_bus_message {
u8 data[0];
} __packed;
/**
* struct hbm_cl_cmd - client specific host bus command
* CONNECT, DISCONNECT, and FlOW CONTROL
*
* @hbm_cmd - bus message command header
* @me_addr - address of the client in ME
* @host_addr - address of the client in the driver
* @data
*/
struct mei_hbm_cl_cmd {
u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
u8 data;
};
struct hbm_version {
u8 minor_version;
u8 major_version;
@ -333,11 +246,5 @@ struct hbm_flow_control {
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
} __packed;
struct mei_me_client {
struct mei_client_properties props;
u8 client_id;
u8 mei_flow_ctrl_creds;
} __packed;
#endif

View File

@ -19,11 +19,11 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include "mei_dev.h"
#include "hw.h"
#include "interface.h"
#include <linux/mei.h>
#include "mei_dev.h"
#include "client.h"
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
/**
* mei_io_list_flush - removes list entry belonging to cl.
*
* @list: An instance of our list structure
* @cl: private data of the file object
*/
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
void mei_device_init(struct mei_device *dev)
{
struct mei_cl_cb *pos;
struct mei_cl_cb *next;
list_for_each_entry_safe(pos, next, &list->list, list) {
if (pos->cl) {
if (mei_cl_cmp_id(cl, pos->cl))
list_del(&pos->list);
}
}
}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @dev: the device structure
* @cl: private data of the file object
*/
int mei_cl_flush_queues(struct mei_cl *cl)
{
if (!cl || !cl->dev)
return -EINVAL;
dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
mei_io_list_flush(&cl->dev->write_list, cl);
mei_io_list_flush(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
return 0;
}
/**
* init_mei_device - allocates and initializes the mei device structure
*
* @pdev: The pci device structure
*
* returns The mei_device_device pointer on success, NULL on failure.
*/
struct mei_device *mei_device_init(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
if (!dev)
return NULL;
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->wd_cl.link);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
mei_io_list_init(&dev->ctrl_rd_list);
mei_io_list_init(&dev->amthif_cmd_list);
mei_io_list_init(&dev->amthif_rd_complete_list);
dev->pdev = pdev;
return dev;
}
/**
@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
*/
int mei_hw_init(struct mei_device *dev)
{
int err = 0;
int ret;
int ret = 0;
mutex_lock(&dev->device_lock);
dev->host_hw_state = mei_hcsr_read(dev);
dev->me_hw_state = mei_mecsr_read(dev);
dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
/* acknowledge interrupt and stop interupts */
if ((dev->host_hw_state & H_IS) == H_IS)
mei_reg_write(dev, H_CSR, dev->host_hw_state);
mei_clear_interrupts(dev);
/* Doesn't change in runtime */
dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
mei_hw_config(dev);
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
mei_reset(dev, 1);
dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
/* wait for ME to turn on ME_RDY */
if (!dev->recvd_msg) {
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->recvd_msg,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
}
if (err <= 0 && !dev->recvd_msg) {
if (ret <= 0 && !dev->recvd_msg) {
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
"on wait for ME to turn on ME_RDY.\n");
ret = -ENODEV;
goto out;
goto err;
}
if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev,
"host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
if (!(dev->host_hw_state & H_RDY))
dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
if (!mei_host_is_ready(dev)) {
dev_err(&dev->pdev->dev, "host is not ready.\n");
goto err;
}
if (!(dev->me_hw_state & ME_RDY_HRA))
dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
ret = -ENODEV;
goto out;
if (!mei_hw_is_ready(dev)) {
dev_err(&dev->pdev->dev, "ME is not ready.\n");
goto err;
}
if (dev->version.major_version != HBM_MAJOR_VERSION ||
dev->version.minor_version != HBM_MINOR_VERSION) {
dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
ret = -ENODEV;
goto out;
goto err;
}
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
dev_dbg(&dev->pdev->dev, "MEI start success.\n");
ret = 0;
out:
mutex_unlock(&dev->device_lock);
return ret;
}
/**
* mei_hw_reset - resets fw via mei csr register.
*
* @dev: the device structure
* @interrupts_enabled: if interrupt should be enabled after reset.
*/
static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
{
dev->host_hw_state |= (H_RST | H_IG);
if (interrupts_enabled)
mei_enable_interrupts(dev);
else
mei_disable_interrupts(dev);
return 0;
err:
dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
dev->dev_state = MEI_DEV_DISABLED;
mutex_unlock(&dev->device_lock);
return -ENODEV;
}
/**
@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
*/
void mei_reset(struct mei_device *dev, int interrupts_enabled)
{
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
bool unexpected;
if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
dev->need_reset = true;
if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
return;
}
unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN &&
dev->dev_state != MEI_DEV_POWER_UP);
dev->host_hw_state = mei_hcsr_read(dev);
dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
dev->host_hw_state);
mei_hw_reset(dev, interrupts_enabled);
dev->host_hw_state &= ~H_RST;
dev->host_hw_state |= H_IG;
mei_hcsr_set(dev);
dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
dev->host_hw_state);
dev->need_reset = false;
if (dev->dev_state != MEI_DEV_INITIALIZING) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETING;
list_for_each_entry_safe(cl_pos,
cl_next, &dev->file_list, link) {
cl_pos->state = MEI_FILE_DISCONNECTED;
cl_pos->mei_flow_ctrl_creds = 0;
cl_pos->read_cb = NULL;
cl_pos->timer_count = 0;
}
mei_cl_all_disconnect(dev);
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
mei_me_cl_unlink(dev, &dev->wd_cl);
mei_me_cl_unlink(dev, &dev->iamthif_cl);
mei_cl_unlink(&dev->wd_cl);
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_cl_unlink(&dev->iamthif_cl);
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_amthif_reset_params(dev);
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
/* update the state of the registers after reset */
dev->host_hw_state = mei_hcsr_read(dev);
dev->me_hw_state = mei_mecsr_read(dev);
dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
if (unexpected)
dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
/* Wake up all readings so they can be interrupted */
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if (waitqueue_active(&cl_pos->rx_wait)) {
dev_dbg(&dev->pdev->dev, "Waking up client!\n");
wake_up_interruptible(&cl_pos->rx_wait);
}
}
/* wake up all readings so they can be interrupted */
mei_cl_all_read_wakeup(dev);
/* remove all waiting requests */
list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
list_del(&cb_pos->list);
mei_io_cb_free(cb_pos);
}
mei_cl_all_write_clear(dev);
}
/**
* host_start_message - mei host sends start message.
*
* @dev: the device structure
*
* returns none.
*/
void mei_host_start_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_host_version_request *start_req;
const size_t len = sizeof(struct hbm_host_version_request);
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
/* host start message */
start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
memset(start_req, 0, len);
start_req->hbm_cmd = HOST_START_REQ_CMD;
start_req->host_version.major_version = HBM_MAJOR_VERSION;
start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->dev_state = MEI_DEV_RESETING;
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_START_MESSAGE;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return ;
}
/**
* host_enum_clients_message - host sends enumeration client request message.
*
* @dev: the device structure
*
* returns none.
*/
void mei_host_enum_clients_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_host_enum_request *enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
/* enumerate clients */
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
dev->dev_state = MEI_DEV_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return;
}
/**
* allocate_me_clients_storage - allocates storage for me clients
*
* @dev: the device structure
*
* returns none.
*/
void mei_allocate_me_clients_storage(struct mei_device *dev)
{
struct mei_me_client *clients;
int b;
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
dev->me_clients_num++;
if (dev->me_clients_num <= 0)
return ;
if (dev->me_clients != NULL) {
kfree(dev->me_clients);
dev->me_clients = NULL;
}
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
clients = kcalloc(dev->me_clients_num,
sizeof(struct mei_me_client), GFP_KERNEL);
if (!clients) {
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
dev->dev_state = MEI_DEV_RESETING;
mei_reset(dev, 1);
return ;
}
dev->me_clients = clients;
return ;
}
void mei_host_client_init(struct work_struct *work)
{
struct mei_device *dev = container_of(work,
struct mei_device, init_work);
struct mei_client_properties *client_props;
int i;
mutex_lock(&dev->device_lock);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
/*
* Reserving the first three client IDs
* 0: Reserved for MEI Bus Message communications
* 1: Reserved for Watchdog
* 2: Reserved for AMTHI
*/
bitmap_set(dev->host_clients_map, 0, 3);
for (i = 0; i < dev->me_clients_num; i++) {
client_props = &dev->me_clients[i].props;
if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
mei_amthif_host_init(dev);
else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
mei_wd_host_init(dev);
}
dev->dev_state = MEI_DEV_ENABLED;
mutex_unlock(&dev->device_lock);
}
int mei_host_client_enumerate(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_props_request *prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long next_client_index;
u8 client_num;
client_num = dev->me_client_presentation_num;
next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
dev->me_client_index);
/* We got all client properties */
if (next_client_index == MEI_CLIENTS_MAX) {
schedule_work(&dev->init_work);
return 0;
}
dev->me_clients[client_num].client_id = next_client_index;
dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
memset(prop_req, 0, sizeof(struct hbm_props_request));
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req->address = next_client_index;
if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
mei_hdr->length)) {
dev->dev_state = MEI_DEV_RESETING;
dev_err(&dev->pdev->dev, "Properties request command failed\n");
mei_reset(dev, 1);
return -EIO;
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
dev->me_client_index = next_client_index;
return 0;
}
/**
* mei_init_file_private - initializes private file structure.
*
* @priv: private file structure to be initialized
* @file: the file structure
*/
void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
{
memset(priv, 0, sizeof(struct mei_cl));
init_waitqueue_head(&priv->wait);
init_waitqueue_head(&priv->rx_wait);
init_waitqueue_head(&priv->tx_wait);
INIT_LIST_HEAD(&priv->link);
priv->reading_state = MEI_IDLE;
priv->writing_state = MEI_IDLE;
priv->dev = dev;
}
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
{
int i, res = -ENOENT;
for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(*cuuid,
dev->me_clients[i].props.protocol_name) == 0) {
res = i;
break;
}
return res;
}
/**
* mei_me_cl_link - create link between host and me clinet and add
* me_cl to the list
*
* @dev: the device structure
* @cl: link between me and host client assocated with opened file descriptor
* @cuuid: uuid of ME client
* @client_id: id of the host client
*
* returns ME client index if ME client
* -EINVAL on incorrect values
* -ENONET if client not found
*/
int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
const uuid_le *cuuid, u8 host_cl_id)
{
int i;
if (!dev || !cl || !cuuid)
return -EINVAL;
/* check for valid client id */
i = mei_me_cl_by_uuid(dev, cuuid);
if (i >= 0) {
cl->me_client_id = dev->me_clients[i].client_id;
cl->state = MEI_FILE_CONNECTING;
cl->host_client_id = host_cl_id;
list_add_tail(&cl->link, &dev->file_list);
return (u8)i;
}
return -ENOENT;
}
/**
* mei_me_cl_unlink - remove me_cl from the list
*
* @dev: the device structure
* @host_client_id: host client id to be removed
*/
void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_cl *pos, *next;
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if (cl->host_client_id == pos->host_client_id) {
dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
pos->host_client_id, pos->me_client_id);
list_del_init(&pos->link);
break;
}
}
}
/**
* mei_alloc_file_private - allocates a private file structure and sets it up.
* @file: the file structure
*
* returns The allocated file or NULL on failure
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
if (!cl)
return NULL;
mei_cl_init(cl, dev);
return cl;
}
/**
* mei_disconnect_host_client - sends disconnect message to fw from host client.
*
* @dev: the device structure
* @cl: private data of the file object
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_cl_cb *cb;
int rets, err;
if (!dev || !cl)
return -ENODEV;
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
cb->fop_type = MEI_FOP_CLOSE;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_disconnect(dev, cl)) {
rets = -ENODEV;
dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
goto free;
}
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
MEI_FILE_DISCONNECTED == cl->state,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (MEI_FILE_DISCONNECTED == cl->state) {
rets = 0;
dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
} else {
rets = -ENODEV;
if (MEI_FILE_DISCONNECTED != cl->state)
dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
if (err)
dev_dbg(&dev->pdev->dev,
"wait failed disconnect err=%08x\n",
err);
dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
}
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
mei_io_cb_free(cb);
return rets;
}

View File

@ -1,388 +0,0 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include "mei_dev.h"
#include <linux/mei.h>
#include "interface.h"
/**
* mei_set_csr_register - writes H_CSR register to the mei device,
* and ignores the H_IS bit for it is write-one-to-zero.
*
* @dev: the device structure
*/
void mei_hcsr_set(struct mei_device *dev)
{
if ((dev->host_hw_state & H_IS) == H_IS)
dev->host_hw_state &= ~H_IS;
mei_reg_write(dev, H_CSR, dev->host_hw_state);
dev->host_hw_state = mei_hcsr_read(dev);
}
/**
* mei_csr_enable_interrupts - enables mei device interrupts
*
* @dev: the device structure
*/
void mei_enable_interrupts(struct mei_device *dev)
{
dev->host_hw_state |= H_IE;
mei_hcsr_set(dev);
}
/**
* mei_csr_disable_interrupts - disables mei device interrupts
*
* @dev: the device structure
*/
void mei_disable_interrupts(struct mei_device *dev)
{
dev->host_hw_state &= ~H_IE;
mei_hcsr_set(dev);
}
/**
* mei_hbuf_filled_slots - gets number of device filled buffer slots
*
* @device: the device structure
*
* returns number of filled slots
*/
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
{
char read_ptr, write_ptr;
dev->host_hw_state = mei_hcsr_read(dev);
read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
return (unsigned char) (write_ptr - read_ptr);
}
/**
* mei_hbuf_is_empty - checks if host buffer is empty.
*
* @dev: the device structure
*
* returns true if empty, false - otherwise.
*/
bool mei_hbuf_is_empty(struct mei_device *dev)
{
return mei_hbuf_filled_slots(dev) == 0;
}
/**
* mei_hbuf_empty_slots - counts write empty slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
*/
int mei_hbuf_empty_slots(struct mei_device *dev)
{
unsigned char filled_slots, empty_slots;
filled_slots = mei_hbuf_filled_slots(dev);
empty_slots = dev->hbuf_depth - filled_slots;
/* check for overflow */
if (filled_slots > dev->hbuf_depth)
return -EOVERFLOW;
return empty_slots;
}
/**
* mei_write_message - writes a message to mei device.
*
* @dev: the device structure
* @header: header of message
* @write_buffer: message buffer will be written
* @write_length: message size will be written
*
* This function returns -EIO if write has failed
*/
int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
unsigned char *buf, unsigned long length)
{
unsigned long rem, dw_cnt;
u32 *reg_buf = (u32 *)buf;
int i;
int empty_slots;
dev_dbg(&dev->pdev->dev,
"mei_write_message header=%08x.\n",
*((u32 *) header));
empty_slots = mei_hbuf_empty_slots(dev);
dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
dw_cnt = mei_data2slots(length);
if (empty_slots < 0 || dw_cnt > empty_slots)
return -EIO;
mei_reg_write(dev, H_CB_WW, *((u32 *) header));
for (i = 0; i < length / 4; i++)
mei_reg_write(dev, H_CB_WW, reg_buf[i]);
rem = length & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(&reg, &buf[length - rem], rem);
mei_reg_write(dev, H_CB_WW, reg);
}
dev->host_hw_state = mei_hcsr_read(dev);
dev->host_hw_state |= H_IG;
mei_hcsr_set(dev);
dev->me_hw_state = mei_mecsr_read(dev);
if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
return -EIO;
return 0;
}
/**
* mei_count_full_read_slots - counts read full slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
*/
int mei_count_full_read_slots(struct mei_device *dev)
{
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
dev->me_hw_state = mei_mecsr_read(dev);
buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
filled_slots = (unsigned char) (write_ptr - read_ptr);
/* check for overflow */
if (filled_slots > buffer_depth)
return -EOVERFLOW;
dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
/**
* mei_read_slots - reads a message from mei device.
*
* @dev: the device structure
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
*reg_buf++ = mei_mecbrw_read(dev);
if (buffer_length > 0) {
u32 reg = mei_mecbrw_read(dev);
memcpy(reg_buf, &reg, buffer_length);
}
dev->host_hw_state |= H_IG;
mei_hcsr_set(dev);
}
/**
* mei_flow_ctrl_creds - checks flow_control credentials.
*
* @dev: the device structure
* @cl: private data of the file object
*
* returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
* -ENOENT if mei_cl is not present
* -EINVAL if single_recv_buf == 0
*/
int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
{
int i;
if (!dev->me_clients_num)
return 0;
if (cl->mei_flow_ctrl_creds > 0)
return 1;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->mei_flow_ctrl_creds) {
if (WARN_ON(me_cl->props.single_recv_buf == 0))
return -EINVAL;
return 1;
} else {
return 0;
}
}
}
return -ENOENT;
}
/**
* mei_flow_ctrl_reduce - reduces flow_control.
*
* @dev: the device structure
* @cl: private data of the file object
* @returns
* 0 on success
* -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
{
int i;
if (!dev->me_clients_num)
return -ENOENT;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->props.single_recv_buf != 0) {
if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
dev->me_clients[i].mei_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
return 0;
}
}
return -ENOENT;
}
/**
* mei_send_flow_control - sends flow control to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_flow_control *flow_ctrl;
const size_t len = sizeof(struct hbm_flow_control);
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
memset(flow_ctrl, 0, len);
flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
flow_ctrl->host_addr = cl->host_client_id;
flow_ctrl->me_addr = cl->me_client_id;
/* FIXME: reserved !? */
memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
return mei_write_message(dev, mei_hdr,
(unsigned char *) flow_ctrl, len);
}
/**
* mei_other_client_is_connecting - checks if other
* client with the same client id is connected.
*
* @dev: the device structure
* @cl: private data of the file object
*
* returns 1 if other client is connected, 0 - otherwise.
*/
int mei_other_client_is_connecting(struct mei_device *dev,
struct mei_cl *cl)
{
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if ((cl_pos->state == MEI_FILE_CONNECTING) &&
(cl_pos != cl) &&
cl->me_client_id == cl_pos->me_client_id)
return 1;
}
return 0;
}
/**
* mei_disconnect - sends disconnect message to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_client_connect_request *req;
const size_t len = sizeof(struct hbm_client_connect_request);
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
memset(req, 0, len);
req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
req->host_addr = cl->host_client_id;
req->me_addr = cl->me_client_id;
req->reserved = 0;
return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
}
/**
* mei_connect - sends connect message to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_connect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_client_connect_request *req;
const size_t len = sizeof(struct hbm_client_connect_request);
mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
req->host_addr = cl->host_client_id;
req->me_addr = cl->me_client_id;
req->reserved = 0;
return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
}

View File

@ -1,81 +0,0 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _MEI_INTERFACE_H_
#define _MEI_INTERFACE_H_
#include <linux/mei.h>
#include "mei_dev.h"
void mei_read_slots(struct mei_device *dev,
unsigned char *buffer,
unsigned long buffer_length);
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
unsigned char *write_buffer,
unsigned long write_length);
bool mei_hbuf_is_empty(struct mei_device *dev);
int mei_hbuf_empty_slots(struct mei_device *dev);
static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
{
return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
}
/* get slots (dwords) from a message length + header (bytes) */
static inline unsigned char mei_data2slots(size_t length)
{
return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
}
int mei_count_full_read_slots(struct mei_device *dev);
int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev);
int mei_wd_host_init(struct mei_device *dev);
/*
* mei_watchdog_register - Registering watchdog interface
* once we got connection to the WD Client
* @dev - mei device
*/
void mei_watchdog_register(struct mei_device *dev);
/*
* mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
*/
void mei_watchdog_unregister(struct mei_device *dev);
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
int mei_connect(struct mei_device *dev, struct mei_cl *cl);
#endif /* _MEI_INTERFACE_H_ */

View File

@ -21,41 +21,21 @@
#include <linux/fs.h>
#include <linux/jiffies.h>
#include "mei_dev.h"
#include <linux/mei.h>
#include "hw.h"
#include "interface.h"
#include "mei_dev.h"
#include "hbm.h"
#include "hw-me.h"
#include "client.h"
/**
* mei_interrupt_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*/
irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
u32 csr_reg = mei_hcsr_read(dev);
if ((csr_reg & H_IS) != H_IS)
return IRQ_NONE;
/* clear H_IS bit in H_CSR */
mei_reg_write(dev, H_CSR, csr_reg);
return IRQ_WAKE_THREAD;
}
/**
* _mei_cmpl - processes completed operation.
* mei_complete_handler - processes completed operation.
*
* @cl: private data of the file object.
* @cb_pos: callback block.
*/
static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
if (cb_pos->fop_type == MEI_FOP_WRITE) {
mei_io_cb_free(cb_pos);
@ -150,8 +130,8 @@ quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
*(u32 *) dev->rd_msg_buf);
dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
MEI_HDR_PRM(mei_hdr));
}
return 0;
@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
if (mei_disconnect(dev, cl)) {
if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
return 0;
}
/**
* is_treat_specially_client - checks if the message belongs
* to the file private data.
*
* @cl: private data of the file object
* @rs: connect response bus message
*
*/
static bool is_treat_specially_client(struct mei_cl *cl,
struct hbm_client_connect_response *rs)
{
if (cl->host_client_id == rs->host_addr &&
cl->me_client_id == rs->me_addr) {
if (!rs->status) {
cl->state = MEI_FILE_CONNECTED;
cl->status = 0;
} else {
cl->state = MEI_FILE_DISCONNECTED;
cl->status = -ENODEV;
}
cl->timer_count = 0;
return true;
}
return false;
}
/**
* mei_client_connect_response - connects to response irq routine
*
* @dev: the device structure
* @rs: connect response bus message
*/
static void mei_client_connect_response(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"connect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&(dev->wd_cl), rs)) {
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
mei_watchdog_register(dev);
return;
}
if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
cl = pos->cl;
if (!cl) {
list_del(&pos->list);
return;
}
if (pos->fop_type == MEI_FOP_IOCTL) {
if (is_treat_specially_client(cl, rs)) {
list_del(&pos->list);
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
}
/**
* mei_client_disconnect_response - disconnects from response irq routine
*
* @dev: the device structure
* @rs: disconnect response bus message
*/
static void mei_client_disconnect_response(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"disconnect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
cl = pos->cl;
if (!cl) {
list_del(&pos->list);
return;
}
dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
if (cl->host_client_id == rs->host_addr &&
cl->me_client_id == rs->me_addr) {
list_del(&pos->list);
if (!rs->status)
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
/**
* same_flow_addr - tells if they have the same address.
*
* @file: private data of the file object.
* @flow: flow control.
*
* returns !=0, same; 0,not.
*/
static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
{
return (cl->host_client_id == flow->host_addr &&
cl->me_client_id == flow->me_addr);
}
/**
* add_single_flow_creds - adds single buffer credentials.
*
* @file: private data ot the file object.
* @flow: flow control.
*/
static void add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
struct mei_me_client *client;
int i;
for (i = 0; i < dev->me_clients_num; i++) {
client = &dev->me_clients[i];
if (client && flow->me_addr == client->client_id) {
if (client->props.single_recv_buf) {
client->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
flow->me_addr);
dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
client->mei_flow_ctrl_creds);
} else {
BUG(); /* error in flow control */
}
}
}
}
/**
* mei_client_flow_control_response - flow control response irq routine
*
* @dev: the device structure
* @flow_control: flow control response bus message
*/
static void mei_client_flow_control_response(struct mei_device *dev,
struct hbm_flow_control *flow_control)
{
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
if (!flow_control->host_addr) {
/* single receive buffer */
add_single_flow_creds(dev, flow_control);
} else {
/* normal connection */
list_for_each_entry_safe(cl_pos, cl_next,
&dev->file_list, link) {
dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
cl_pos->host_client_id,
cl_pos->me_client_id);
dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
flow_control->host_addr,
flow_control->me_addr);
if (same_flow_addr(cl_pos, flow_control)) {
dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
flow_control->host_addr,
flow_control->me_addr);
cl_pos->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
cl_pos->mei_flow_ctrl_creds);
break;
}
}
}
}
/**
* same_disconn_addr - tells if they have the same address
*
* @file: private data of the file object.
* @disconn: disconnection request.
*
* returns !=0, same; 0,not.
*/
static int same_disconn_addr(struct mei_cl *cl,
struct hbm_client_connect_request *req)
{
return (cl->host_client_id == req->host_addr &&
cl->me_client_id == req->me_addr);
}
/**
* mei_client_disconnect_request - disconnects from request irq routine
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message.
*/
static void mei_client_disconnect_request(struct mei_device *dev,
struct hbm_client_connect_request *disconnect_req)
{
struct hbm_client_connect_response *disconnect_res;
struct mei_cl *pos, *next;
const size_t len = sizeof(struct hbm_client_connect_response);
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if (same_disconn_addr(pos, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
pos->state = MEI_FILE_DISCONNECTED;
pos->timer_count = 0;
if (pos == &dev->wd_cl)
dev->wd_pending = false;
else if (pos == &dev->iamthif_cl)
dev->iamthif_timer = 0;
/* prepare disconnect response */
(void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
disconnect_res =
(struct hbm_client_connect_response *)
&dev->wr_ext_msg.data;
disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
disconnect_res->host_addr = pos->host_client_id;
disconnect_res->me_addr = pos->me_client_id;
disconnect_res->status = 0;
break;
}
}
}
/**
* mei_irq_thread_read_bus_message - bottom half read routine after ISR to
* handle the read bus message cmd processing.
*
* @dev: the device structure
* @mei_hdr: header of bus message
*/
static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
struct mei_bus_message *mei_msg;
struct mei_me_client *me_client;
struct hbm_host_version_response *version_res;
struct hbm_client_connect_response *connect_res;
struct hbm_client_connect_response *disconnect_res;
struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *flow_control;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
struct hbm_host_stop_request *stop_req;
/* read the message to our buffer */
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
version_res = (struct hbm_host_version_response *) mei_msg;
if (version_res->host_version_supported) {
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->init_clients_state == MEI_START_MESSAGE) {
dev->init_clients_timer = 0;
mei_host_enum_clients_message(dev);
} else {
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
mei_reset(dev, 1);
return;
}
} else {
u32 *buf = dev->wr_msg_buf;
const size_t len = sizeof(struct hbm_host_stop_request);
dev->version = version_res->me_max_version;
/* send stop message */
mei_hdr = mei_hbm_hdr(&buf[0], len);
stop_req = (struct hbm_host_stop_request *)&buf[1];
memset(stop_req, 0, len);
stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
stop_req->reason = DRIVER_STOP_REQUEST;
mei_write_message(dev, mei_hdr,
(unsigned char *)stop_req, len);
dev_dbg(&dev->pdev->dev, "version mismatch.\n");
return;
}
dev->recvd_msg = true;
dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
case CLIENT_CONNECT_RES_CMD:
connect_res = (struct hbm_client_connect_response *) mei_msg;
mei_client_connect_response(dev, connect_res);
dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res = (struct hbm_client_connect_response *) mei_msg;
mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case MEI_FLOW_CONTROL_CMD:
flow_control = (struct hbm_flow_control *) mei_msg;
mei_client_flow_control_response(dev, flow_control);
dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
props_res = (struct hbm_props_response *)mei_msg;
me_client = &dev->me_clients[dev->me_client_presentation_num];
if (props_res->status || !dev->me_clients) {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
mei_reset(dev, 1);
return;
}
if (me_client->client_id != props_res->address) {
dev_err(&dev->pdev->dev,
"Host client properties reply mismatch\n");
mei_reset(dev, 1);
return;
}
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
dev_err(&dev->pdev->dev,
"Unexpected client properties reply\n");
mei_reset(dev, 1);
return;
}
me_client->props = props_res->client_properties;
dev->me_client_index++;
dev->me_client_presentation_num++;
mei_host_client_enumerate(dev);
break;
case HOST_ENUM_RES_CMD:
enum_res = (struct hbm_host_enum_response *) mei_msg;
memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
dev->init_clients_timer = 0;
dev->me_client_presentation_num = 0;
dev->me_client_index = 0;
mei_allocate_me_clients_storage(dev);
dev->init_clients_state =
MEI_CLIENT_PROPERTIES_MESSAGE;
mei_host_client_enumerate(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
mei_reset(dev, 1);
return;
}
break;
case HOST_STOP_RES_CMD:
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
mei_reset(dev, 1);
break;
case CLIENT_DISCONNECT_REQ_CMD:
/* search for client */
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_client_disconnect_request(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
{
/* prepare stop request: sent in next interrupt event */
const size_t len = sizeof(struct hbm_host_stop_request);
mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
memset(stop_req, 0, len);
stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
stop_req->reason = DRIVER_STOP_REQUEST;
break;
}
default:
BUG();
break;
}
}
/**
* _mei_hb_read - processes read related operation.
@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
if (mei_send_flow_control(dev, cl)) {
if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
}
cl->state = MEI_FILE_CONNECTING;
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
if (mei_connect(dev, cl)) {
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
if (mei_hbm_cl_connect_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_del(&cb_pos->list);
@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
struct mei_msg_hdr *mei_hdr;
struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = cb->request_buffer.size - cb->buf_idx;
size_t msg_slots = mei_data2slots(len);
mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->reserved = 0;
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
mei_hdr->length = len;
mei_hdr->msg_complete = 1;
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr->length = len;
mei_hdr->msg_complete = 0;
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
mei_hdr->length, mei_hdr->msg_complete);
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
if (mei_write_message(dev, mei_hdr,
cb->request_buffer.data + cb->buf_idx, len)) {
if (mei_write_message(dev, &mei_hdr,
cb->request_buffer.data + cb->buf_idx)) {
cl->status = -ENODEV;
list_move_tail(&cb->list, &cmpl_list->list);
return -ENODEV;
}
if (mei_flow_ctrl_reduce(dev, cl))
if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
cl->status = 0;
cb->buf_idx += mei_hdr->length;
if (mei_hdr->msg_complete)
cb->buf_idx += mei_hdr.length;
if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
* mei_irq_thread_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
* @cmpl_list: An instance of our list structure
* @dev: the device structure
* @cmpl_list: An instance of our list structure
* @slots: slots to read.
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
struct mei_device *dev,
s32 *slots)
int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl_pos = NULL;
@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
int ret = 0;
if (!dev->rd_msg_hdr) {
dev->rd_msg_hdr = mei_mecbrw_read(dev);
dev->rd_msg_hdr = mei_read_hdr(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
mei_irq_thread_read_bus_message(dev, mei_hdr);
mei_hbm_dispatch(dev, mei_hdr);
dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
mei_hdr->length);
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
if (ret)
goto end;
} else {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
ret = mei_irq_thread_read_client_message(cmpl_list,
@ -869,15 +411,15 @@ end:
/**
* mei_irq_thread_write_handler - bottom half write routine after
* ISR to handle the write processing.
* mei_irq_write_handler - dispatch write requests
* after irq received
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_write_handler(struct mei_device *dev,
int mei_irq_write_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list)
{
@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
s32 slots;
int ret;
if (!mei_hbuf_is_empty(dev)) {
if (!mei_hbuf_is_ready(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
if (dev->wr_ext_msg.hdr.length) {
mei_write_message(dev, &dev->wr_ext_msg.hdr,
dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
dev->wr_ext_msg.data);
slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
dev->wr_ext_msg.hdr.length = 0;
}
if (dev->dev_state == MEI_DEV_ENABLED) {
if (dev->wd_pending &&
mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
return -ENODEV;
dev->wd_pending = false;
@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
break;
case MEI_FOP_IOCTL:
/* connect message */
if (mei_other_client_is_connecting(dev, cl))
if (mei_cl_is_other_connecting(cl))
continue;
ret = _mei_irq_thread_ioctl(dev, &slots, pos,
cl, cmpl_list);
@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
cl = pos->cl;
if (cl == NULL)
continue;
if (mei_flow_ctrl_creds(dev, cl) <= 0) {
if (mei_cl_flow_ctrl_creds(cl) <= 0) {
dev_dbg(&dev->pdev->dev,
"No flow control credentials for client %d, not sending.\n",
cl->host_client_id);
@ -1123,115 +665,3 @@ out:
mutex_unlock(&dev->device_lock);
}
/**
* mei_interrupt_thread_handler - function called after ISR to handle the interrupt
* processing.
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*
*/
irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
struct mei_cl *cl;
s32 slots;
int rets;
bool bus_message_received;
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
mei_io_list_init(&complete_list);
dev->host_hw_state = mei_hcsr_read(dev);
/* Ack the interrupt here
* In case of MSI we don't go through the quick handler */
if (pci_dev_msi_enabled(dev->pdev))
mei_reg_write(dev, H_CSR, dev->host_hw_state);
dev->me_hw_state = mei_mecsr_read(dev);
/* check if ME wants a reset */
if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
dev->dev_state != MEI_DEV_RESETING &&
dev->dev_state != MEI_DEV_INITIALIZING) {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
/* check if we need to start the dev */
if ((dev->host_hw_state & H_RDY) == 0) {
if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
dev->host_hw_state |= (H_IE | H_IG | H_RDY);
mei_hcsr_set(dev);
dev->dev_state = MEI_DEV_INIT_CLIENTS;
dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
/* link is established
* start sending messages.
*/
mei_host_start_message(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
} else {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
/* we have urgent data to send so break the read */
if (dev->wr_ext_msg.hdr.length)
break;
dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
if (rets)
goto end;
}
rets = mei_irq_thread_write_handler(dev, &complete_list);
end:
dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
dev->host_hw_state = mei_hcsr_read(dev);
dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
bus_message_received = false;
if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
bus_message_received = true;
}
mutex_unlock(&dev->device_lock);
if (bus_message_received) {
dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
if (list_empty(&complete_list.list))
return IRQ_HANDLED;
list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
cl = cb_pos->cl;
list_del(&cb_pos->list);
if (cl) {
if (cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "completing call back.\n");
_mei_cmpl(cl, cb_pos);
cb_pos = NULL;
} else if (cl == &dev->iamthif_cl) {
mei_amthif_complete(dev, cb_pos);
}
}
}
return IRQ_HANDLED;
}

View File

@ -1,366 +0,0 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/uuid.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include "mei_dev.h"
#include "hw.h"
#include <linux/mei.h>
#include "interface.h"
/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
*/
void mei_io_cb_free(struct mei_cl_cb *cb)
{
if (cb == NULL)
return;
kfree(cb->request_buffer.data);
kfree(cb->response_buffer.data);
kfree(cb);
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl - mei client
* @file: pointer to file structure
*
* returns mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
{
struct mei_cl_cb *cb;
cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
if (!cb)
return NULL;
mei_io_list_init(cb);
cb->file_object = fp;
cb->cl = cl;
cb->buf_idx = 0;
return cb;
}
/**
* mei_io_cb_alloc_req_buf - allocate request buffer
*
* @cb - io callback structure
* @size: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->request_buffer.data)
return -ENOMEM;
cb->request_buffer.size = length;
return 0;
}
/**
* mei_io_cb_alloc_req_buf - allocate respose buffer
*
* @cb - io callback structure
* @size: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->response_buffer.data)
return -ENOMEM;
cb->response_buffer.size = length;
return 0;
}
/**
* mei_me_cl_by_id return index to me_clients for client_id
*
* @dev: the device structure
* @client_id: me client id
*
* Locking: called under "dev->device_lock" lock
*
* returns index on success, -ENOENT on failure.
*/
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
int i;
for (i = 0; i < dev->me_clients_num; i++)
if (dev->me_clients[i].client_id == client_id)
break;
if (WARN_ON(dev->me_clients[i].client_id != client_id))
return -ENOENT;
if (i == dev->me_clients_num)
return -ENOENT;
return i;
}
/**
* mei_ioctl_connect_client - the connect to fw client IOCTL function
*
* @dev: the device structure
* @data: IOCTL connect data, input and output parameters
* @file: private data of the file object
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_ioctl_connect_client(struct file *file,
struct mei_connect_client_data *data)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
struct mei_client *client;
struct mei_cl *cl;
long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
int i;
int err;
int rets;
cl = file->private_data;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
/* buffered ioctl cb */
cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
goto end;
}
cb->fop_type = MEI_FOP_IOCTL;
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto end;
}
if (cl->state != MEI_FILE_INITIALIZING &&
cl->state != MEI_FILE_DISCONNECTED) {
rets = -EBUSY;
goto end;
}
/* find ME client we're trying to connect to */
i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
cl->me_client_id = dev->me_clients[i].client_id;
cl->state = MEI_FILE_CONNECTING;
}
dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
cl->me_client_id);
dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
dev->me_clients[i].props.protocol_version);
dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
dev->me_clients[i].props.max_msg_length);
/* if we're connecting to amthi client then we will use the
* existing connection
*/
if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
rets = -ENODEV;
goto end;
}
clear_bit(cl->host_client_id, dev->host_clients_map);
mei_me_cl_unlink(dev, cl);
kfree(cl);
cl = NULL;
file->private_data = &dev->iamthif_cl;
client = &data->out_client_properties;
client->max_msg_length =
dev->me_clients[i].props.max_msg_length;
client->protocol_version =
dev->me_clients[i].props.protocol_version;
rets = dev->iamthif_cl.status;
goto end;
}
if (cl->state != MEI_FILE_CONNECTING) {
rets = -ENODEV;
goto end;
}
/* prepare the output buffer */
client = &data->out_client_properties;
client->max_msg_length = dev->me_clients[i].props.max_msg_length;
client->protocol_version = dev->me_clients[i].props.protocol_version;
dev_dbg(&dev->pdev->dev, "Can connect?\n");
if (dev->mei_host_buffer_is_empty
&& !mei_other_client_is_connecting(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
dev->mei_host_buffer_is_empty = false;
if (mei_connect(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
rets = -ENODEV;
goto end;
} else {
dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
cl->timer_count = MEI_CONNECT_TIMEOUT;
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
}
} else {
dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
(MEI_FILE_CONNECTED == cl->state ||
MEI_FILE_DISCONNECTED == cl->state), timeout);
mutex_lock(&dev->device_lock);
if (MEI_FILE_CONNECTED == cl->state) {
dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
rets = cl->status;
goto end;
} else {
dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
cl->state);
if (!err) {
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed on client"
" connect message fw response message.\n");
}
rets = -EFAULT;
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
goto end;
}
rets = 0;
end:
dev_dbg(&dev->pdev->dev, "free connect cb memory.");
mei_io_cb_free(cb);
return rets;
}
/**
* mei_start_read - the start read client message function.
*
* @dev: the device structure
* @if_num: minor number
* @cl: private data of the file object
*
* returns 0 on success, <0 on failure.
*/
int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_cl_cb *cb;
int rets;
int i;
if (cl->state != MEI_FILE_CONNECTED)
return -ENODEV;
if (dev->dev_state != MEI_DEV_ENABLED)
return -ENODEV;
if (cl->read_pending || cl->read_cb) {
dev_dbg(&dev->pdev->dev, "read is pending.\n");
return -EBUSY;
}
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
dev_err(&dev->pdev->dev, "no such me client %d\n",
cl->me_client_id);
return -ENODEV;
}
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
rets = mei_io_cb_alloc_resp_buf(cb,
dev->me_clients[i].props.max_msg_length);
if (rets)
goto err;
cb->fop_type = MEI_FOP_READ;
cl->read_cb = cb;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_send_flow_control(dev, cl)) {
rets = -ENODEV;
goto err;
}
list_add_tail(&cb->list, &dev->read_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
return rets;
err:
mei_io_cb_free(cb);
return rets;
}

View File

@ -37,79 +37,11 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include "mei_dev.h"
#include <linux/mei.h>
#include "interface.h"
/* AMT device is a singleton on the platform */
static struct pci_dev *mei_pdev;
/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
static DEFINE_MUTEX(mei_mutex);
/**
* find_read_list_entry - find read list entry
*
* @dev: device structure
* @file: pointer to file structure
*
* returns cb on success, NULL on error
*/
static struct mei_cl_cb *find_read_list_entry(
struct mei_device *dev,
struct mei_cl *cl)
{
struct mei_cl_cb *pos = NULL;
struct mei_cl_cb *next = NULL;
dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
if (mei_cl_cmp_id(cl, pos->cl))
return pos;
return NULL;
}
#include "mei_dev.h"
#include "hw-me.h"
#include "client.h"
/**
* mei_open - the open function
@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
*/
static int mei_open(struct inode *inode, struct file *file)
{
struct miscdevice *misc = file->private_data;
struct pci_dev *pdev;
struct mei_cl *cl;
struct mei_device *dev;
unsigned long cl_id;
int err;
err = -ENODEV;
if (!mei_pdev)
if (!misc->parent)
goto out;
dev = pci_get_drvdata(mei_pdev);
pdev = container_of(misc->parent, struct pci_dev, dev);
dev = pci_get_drvdata(pdev);
if (!dev)
goto out;
@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
goto out_unlock;
}
cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (cl_id >= MEI_CLIENTS_MAX) {
dev_err(&dev->pdev->dev, "client_id exceded %d",
MEI_CLIENTS_MAX) ;
err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
if (err)
goto out_unlock;
}
cl->host_client_id = cl_id;
dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
dev->open_handle_count++;
list_add_tail(&cl->link, &dev->file_list);
set_bit(cl->host_client_id, dev->host_clients_map);
cl->state = MEI_FILE_INITIALIZING;
cl->sm_state = 0;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
"ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
rets = mei_disconnect_host_client(dev, cl);
rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
mei_me_cl_unlink(dev, cl);
mei_cl_unlink(cl);
/* free read cb */
cb = NULL;
if (cl->read_cb) {
cb = find_read_list_entry(dev, cl);
cb = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb)
list_del(&cb->list);
@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
err = mei_start_read(dev, cl);
err = mei_cl_read_start(cl);
if (err && err != -EBUSY) {
dev_dbg(&dev->pdev->dev,
"mei start read failure with status = %d\n", err);
@ -393,14 +315,13 @@ copy_buffer:
goto out;
free:
cb_pos = find_read_list_entry(dev, cl);
cb_pos = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb_pos)
list_del(&cb_pos->list);
mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
cl->read_pending = 0;
out:
dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
mutex_unlock(&dev->device_lock);
@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
/* free entry used in read */
if (cl->reading_state == MEI_READ_COMPLETE) {
*offset = 0;
write_cb = find_read_list_entry(dev, cl);
write_cb = mei_cl_find_read_cb(cl);
if (write_cb) {
list_del(&write_cb->list);
mei_io_cb_free(write_cb);
write_cb = NULL;
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
cl->read_pending = 0;
}
} else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
} else if (cl->reading_state == MEI_IDLE)
*offset = 0;
@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
if (rets) {
dev_err(&dev->pdev->dev,
"amthi write failed with status = %d\n", rets);
"amthif write failed with status = %d\n", rets);
goto err;
}
mutex_unlock(&dev->device_lock);
@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
rets = mei_flow_ctrl_creds(dev, cl);
rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
if (rets == 0 || !dev->hbuf_is_ready) {
write_cb->buf_idx = 0;
mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
goto out;
}
dev->mei_host_buffer_is_empty = false;
if (length > mei_hbuf_max_data(dev)) {
mei_hdr.length = mei_hbuf_max_data(dev);
dev->hbuf_is_ready = false;
if (length > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = length;
@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
*((u32 *) &mei_hdr));
if (mei_write_message(dev, &mei_hdr,
write_cb->request_buffer.data, mei_hdr.length)) {
dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
MEI_HDR_PRM(&mei_hdr));
if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
rets = -ENODEV;
goto err;
}
@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
out:
if (mei_hdr.msg_complete) {
if (mei_flow_ctrl_reduce(dev, cl)) {
if (mei_cl_flow_ctrl_reduce(cl)) {
rets = -ENODEV;
goto err;
}
@ -582,6 +502,103 @@ err:
return rets;
}
/**
* mei_ioctl_connect_client - the connect to fw client IOCTL function
*
* @dev: the device structure
* @data: IOCTL connect data, input and output parameters
* @file: private data of the file object
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
static int mei_ioctl_connect_client(struct file *file,
struct mei_connect_client_data *data)
{
struct mei_device *dev;
struct mei_client *client;
struct mei_cl *cl;
int i;
int rets;
cl = file->private_data;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto end;
}
if (cl->state != MEI_FILE_INITIALIZING &&
cl->state != MEI_FILE_DISCONNECTED) {
rets = -EBUSY;
goto end;
}
/* find ME client we're trying to connect to */
i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
cl->me_client_id = dev->me_clients[i].client_id;
cl->state = MEI_FILE_CONNECTING;
}
dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
cl->me_client_id);
dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
dev->me_clients[i].props.protocol_version);
dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
dev->me_clients[i].props.max_msg_length);
/* if we're connecting to amthif client then we will use the
* existing connection
*/
if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
rets = -ENODEV;
goto end;
}
clear_bit(cl->host_client_id, dev->host_clients_map);
mei_cl_unlink(cl);
kfree(cl);
cl = NULL;
file->private_data = &dev->iamthif_cl;
client = &data->out_client_properties;
client->max_msg_length =
dev->me_clients[i].props.max_msg_length;
client->protocol_version =
dev->me_clients[i].props.protocol_version;
rets = dev->iamthif_cl.status;
goto end;
}
if (cl->state != MEI_FILE_CONNECTING) {
rets = -ENODEV;
goto end;
}
/* prepare the output buffer */
client = &data->out_client_properties;
client->max_msg_length = dev->me_clients[i].props.max_msg_length;
client->protocol_version = dev->me_clients[i].props.protocol_version;
dev_dbg(&dev->pdev->dev, "Can connect?\n");
rets = mei_cl_connect(cl, file);
end:
dev_dbg(&dev->pdev->dev, "free connect cb memory.");
return rets;
}
/**
* mei_ioctl - the IOCTL function
@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
rets = mei_ioctl_connect_client(file, connect_data);
/* if all is ok, copying the data back to user. */
@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
.llseek = no_llseek
};
/*
* Misc Device Struct
*/
@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
};
/**
* mei_quirk_probe - probe for devices that doesn't valid ME interface
* @pdev: PCI device structure
* @ent: entry into pci_device_table
*
* returns true if ME Interface is valid, false otherwise
*/
static bool mei_quirk_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
int mei_register(struct device *dev)
{
u32 reg;
if (ent->device == MEI_DEV_ID_PBG_1) {
pci_read_config_dword(pdev, 0x48, &reg);
/* make sure that bit 9 is up and bit 10 is down */
if ((reg & 0x600) == 0x200) {
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
return false;
}
}
return true;
}
/**
* mei_probe - Device Initialization Routine
*
* @pdev: PCI device structure
* @ent: entry in kcs_pci_tbl
*
* returns 0 on success, <0 on failure.
*/
static int mei_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mei_device *dev;
int err;
mutex_lock(&mei_mutex);
if (!mei_quirk_probe(pdev, ent)) {
err = -ENODEV;
goto end;
}
if (mei_pdev) {
err = -EEXIST;
goto end;
}
/* enable pci dev */
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
/* pci request regions for mei driver */
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto disable_device;
}
/* allocates and initializes the mei dev structure */
dev = mei_device_init(pdev);
if (!dev) {
err = -ENOMEM;
goto release_regions;
}
/* mapping IO device memory */
dev->mem_addr = pci_iomap(pdev, 0, 0);
if (!dev->mem_addr) {
dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
err = -ENOMEM;
goto free_device;
}
pci_enable_msi(pdev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
mei_interrupt_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto disable_msi;
}
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
INIT_WORK(&dev->init_work, mei_host_client_init);
if (mei_hw_init(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
}
err = misc_register(&mei_misc_device);
if (err)
goto release_irq;
mei_pdev = pdev;
pci_set_drvdata(pdev, dev);
schedule_delayed_work(&dev->timer_work, HZ);
mutex_unlock(&mei_mutex);
pr_debug("initialization successful.\n");
return 0;
release_irq:
/* disable interrupts */
dev->host_hw_state = mei_hcsr_read(dev);
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
disable_msi:
pci_disable_msi(pdev);
pci_iounmap(pdev, dev->mem_addr);
free_device:
kfree(dev);
release_regions:
pci_release_regions(pdev);
disable_device:
pci_disable_device(pdev);
end:
mutex_unlock(&mei_mutex);
dev_err(&pdev->dev, "initialization failed.\n");
return err;
mei_misc_device.parent = dev;
return misc_register(&mei_misc_device);
}
/**
* mei_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
* mei_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
static void mei_remove(struct pci_dev *pdev)
void mei_deregister(void)
{
struct mei_device *dev;
if (mei_pdev != pdev)
return;
dev = pci_get_drvdata(pdev);
if (!dev)
return;
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
mei_wd_stop(dev);
mei_pdev = NULL;
if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
mei_disconnect_host_client(dev, &dev->iamthif_cl);
}
if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
dev->wd_cl.state = MEI_FILE_DISCONNECTING;
mei_disconnect_host_client(dev, &dev->wd_cl);
}
/* Unregistering watchdog device */
mei_watchdog_unregister(dev);
/* remove entry if already in list */
dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
mei_me_cl_unlink(dev, &dev->wd_cl);
mei_me_cl_unlink(dev, &dev->iamthif_cl);
dev->iamthif_current_cb = NULL;
dev->me_clients_num = 0;
mutex_unlock(&dev->device_lock);
flush_scheduled_work();
/* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
pci_set_drvdata(pdev, NULL);
if (dev->mem_addr)
pci_iounmap(pdev, dev->mem_addr);
kfree(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
misc_deregister(&mei_misc_device);
}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
int err;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
/* Stop watchdog if exists */
err = mei_wd_stop(dev);
/* Set new mei state */
if (dev->dev_state == MEI_DEV_ENABLED ||
dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev, 0);
}
mutex_unlock(&dev->device_lock);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
return err;
mei_misc_device.parent = NULL;
}
static int mei_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int err;
dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
pci_enable_msi(pdev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
mei_interrupt_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
pdev->irq);
return err;
}
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
return err;
}
static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
#define MEI_PM_OPS (&mei_pm_ops)
#else
#define MEI_PM_OPS NULL
#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
static struct pci_driver mei_driver = {
.name = KBUILD_MODNAME,
.id_table = mei_pci_tbl,
.probe = mei_probe,
.remove = mei_remove,
.shutdown = mei_remove,
.driver.pm = MEI_PM_OPS,
};
module_pci_driver(mei_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");

View File

@ -21,7 +21,9 @@
#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
#include "hw.h"
#include "hw-me-regs.h"
/*
* watch dog definition
@ -44,7 +46,7 @@
/*
* AMTHI Client UUID
*/
extern const uuid_le mei_amthi_guid;
extern const uuid_le mei_amthif_guid;
/*
* Watchdog Client UUID
@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
* Number of File descriptors/handles
* that can be opened to the driver.
*
* Limit to 253: 256 Total Clients
* Limit to 255: 256 Total Clients
* minus internal client for MEI Bus Messags
* minus internal client for AMTHI
* minus internal client for Watchdog
*/
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
/*
* Internal Clients Number
*/
#define MEI_HOST_CLIENT_ID_ANY (-1)
#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
#define MEI_WD_HOST_CLIENT_ID 1
#define MEI_IAMTHIF_HOST_CLIENT_ID 2
/* File state */
@ -150,6 +158,19 @@ struct mei_message_data {
unsigned char *data;
};
/**
* struct mei_me_client - representation of me (fw) client
*
* @props - client properties
* @client_id - me client id
* @mei_flow_ctrl_creds - flow control credits
*/
struct mei_me_client {
struct mei_client_properties props;
u8 client_id;
u8 mei_flow_ctrl_creds;
};
struct mei_cl;
@ -178,7 +199,6 @@ struct mei_cl {
wait_queue_head_t tx_wait;
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
int read_pending;
int status;
/* ID of client connected */
u8 host_client_id;
@ -191,10 +211,67 @@ struct mei_cl {
struct mei_cl_cb *read_cb;
};
/** struct mei_hw_ops
*
* @host_set_ready - notify FW that host side is ready
* @host_is_ready - query for host readiness
* @hw_is_ready - query if hw is ready
* @hw_reset - reset hw
* @hw_config - configure hw
* @intr_clear - clear pending interrupts
* @intr_enable - enable interrupts
* @intr_disable - disable interrupts
* @hbuf_free_slots - query for write buffer empty slots
* @hbuf_is_ready - query if write buffer is empty
* @hbuf_max_len - query for write buffer max len
* @write - write a message to FW
* @rdbuf_full_slots - query how many slots are filled
* @read_hdr - get first 4 bytes (header)
* @read - read a buffer from the FW
*/
struct mei_hw_ops {
void (*host_set_ready) (struct mei_device *dev);
bool (*host_is_ready) (struct mei_device *dev);
bool (*hw_is_ready) (struct mei_device *dev);
void (*hw_reset) (struct mei_device *dev, bool enable);
void (*hw_config) (struct mei_device *dev);
void (*intr_clear) (struct mei_device *dev);
void (*intr_enable) (struct mei_device *dev);
void (*intr_disable) (struct mei_device *dev);
int (*hbuf_free_slots) (struct mei_device *dev);
bool (*hbuf_is_ready) (struct mei_device *dev);
size_t (*hbuf_max_len) (const struct mei_device *dev);
int (*write)(struct mei_device *dev,
struct mei_msg_hdr *hdr,
unsigned char *buf);
int (*rdbuf_full_slots)(struct mei_device *dev);
u32 (*read_hdr)(const struct mei_device *dev);
int (*read) (struct mei_device *dev,
unsigned char *buf, unsigned long len);
};
/**
* struct mei_device - MEI private device struct
* @hbuf_depth - depth of host(write) buffer
* @wr_ext_msg - buffer for hbm control responses (set in read cycle)
* @mem_addr - mem mapped base register address
* @hbuf_depth - depth of hardware host/write buffer is slots
* @hbuf_is_ready - query if the host host/write buffer is ready
* @wr_msg - the buffer for hbm control messages
* @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@ -213,24 +290,14 @@ struct mei_device {
*/
struct list_head file_list;
long open_handle_count;
/*
* memory of device
*/
unsigned int mem_base;
unsigned int mem_length;
void __iomem *mem_addr;
/*
* lock for the device
*/
struct mutex device_lock; /* device lock */
struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
bool recvd_msg;
/*
* hw states of host and fw(ME)
*/
u32 host_hw_state;
u32 me_hw_state;
u8 hbuf_depth;
/*
* waiting queue for receive message from FW
*/
@ -243,11 +310,20 @@ struct mei_device {
enum mei_dev_state dev_state;
enum mei_init_clients_states init_clients_state;
u16 init_clients_timer;
bool need_reset;
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
u32 wr_msg_buf[128]; /* used for control messages */
/* write buffer */
u8 hbuf_depth;
bool hbuf_is_ready;
/* used for control messages */
struct {
struct mei_msg_hdr hdr;
unsigned char data[128];
} wr_msg;
struct {
struct mei_msg_hdr hdr;
unsigned char data[4]; /* All HBM messages are 4 bytes */
@ -261,7 +337,6 @@ struct mei_device {
u8 me_clients_num;
u8 me_client_presentation_num;
u8 me_client_index;
bool mei_host_buffer_is_empty;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
@ -289,6 +364,9 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
const struct mei_hw_ops *ops;
char hw[0] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
/*
* mei init function prototypes
*/
struct mei_device *mei_device_init(struct pci_dev *pdev);
void mei_device_init(struct mei_device *dev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
int mei_task_initialize_clients(void *data);
int mei_initialize_clients(struct mei_device *dev);
int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
void mei_allocate_me_clients_storage(struct mei_device *dev);
int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
const uuid_le *cguid, u8 host_client_id);
void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
/*
* MEI IO Functions
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
/**
* mei_io_list_init - Sets up a queue list.
*
* @list: An instance cl callback structure
*/
static inline void mei_io_list_init(struct mei_cl_cb *list)
{
INIT_LIST_HEAD(&list->list);
}
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
/*
* MEI ME Client Functions
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev);
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
int mei_cl_flush_queues(struct mei_cl *cl);
/**
* mei_cl_cmp_id - tells if file private data have same id
*
* @fe1: private data of 1. file object
* @fe2: private data of 2. file object
*
* returns true - if ids are the same and not NULL
*/
static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
const struct mei_cl *cl2)
{
return cl1 && cl2 &&
(cl1->host_client_id == cl2->host_client_id) &&
(cl1->me_client_id == cl2->me_client_id);
}
/*
* MEI Host Client Functions
*/
void mei_host_start_message(struct mei_device *dev);
void mei_host_enum_clients_message(struct mei_device *dev);
int mei_host_client_enumerate(struct mei_device *dev);
void mei_host_client_init(struct work_struct *work);
/*
* MEI interrupt functions prototype
*/
irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
void mei_timer(struct work_struct *work);
int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list, s32 *slots);
/*
* MEI input output function prototype
*/
int mei_ioctl_connect_client(struct file *file,
struct mei_connect_client_data *data);
int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
/*
* AMTHIF - AMT Host Interface Functions
*/
void mei_amthif_reset_params(struct mei_device *dev);
void mei_amthif_host_init(struct mei_device *dev);
int mei_amthif_host_init(struct mei_device *dev);
int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
int mei_amthif_read_message(struct mei_cl_cb *complete_list,
struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev);
int mei_wd_host_init(struct mei_device *dev);
/*
* mei_watchdog_register - Registering watchdog interface
* once we got connection to the WD Client
* @dev - mei device
*/
void mei_watchdog_register(struct mei_device *dev);
/*
* mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
*/
void mei_watchdog_unregister(struct mei_device *dev);
/*
* Register Access Function
*/
/**
* mei_reg_read - Reads 32bit data from the mei device
*
* @dev: the device structure
* @offset: offset from which to read the data
*
* returns register value (u32)
*/
static inline u32 mei_reg_read(const struct mei_device *dev,
unsigned long offset)
static inline void mei_hw_config(struct mei_device *dev)
{
return ioread32(dev->mem_addr + offset);
dev->ops->hw_config(dev);
}
static inline void mei_hw_reset(struct mei_device *dev, bool enable)
{
dev->ops->hw_reset(dev, enable);
}
/**
* mei_reg_write - Writes 32bit data to the mei device
*
* @dev: the device structure
* @offset: offset from which to write the data
* @value: register value to write (u32)
*/
static inline void mei_reg_write(const struct mei_device *dev,
unsigned long offset, u32 value)
static inline void mei_clear_interrupts(struct mei_device *dev)
{
iowrite32(value, dev->mem_addr + offset);
dev->ops->intr_clear(dev);
}
/**
* mei_hcsr_read - Reads 32bit data from the host CSR
*
* @dev: the device structure
*
* returns the byte read.
*/
static inline u32 mei_hcsr_read(const struct mei_device *dev)
static inline void mei_enable_interrupts(struct mei_device *dev)
{
return mei_reg_read(dev, H_CSR);
dev->ops->intr_enable(dev);
}
/**
* mei_mecsr_read - Reads 32bit data from the ME CSR
*
* @dev: the device structure
*
* returns ME_CSR_HA register value (u32)
*/
static inline u32 mei_mecsr_read(const struct mei_device *dev)
static inline void mei_disable_interrupts(struct mei_device *dev)
{
return mei_reg_read(dev, ME_CSR_HA);
dev->ops->intr_disable(dev);
}
/**
* get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
*
* @dev: the device structure
*
* returns ME_CB_RW register value (u32)
*/
static inline u32 mei_mecbrw_read(const struct mei_device *dev)
static inline void mei_host_set_ready(struct mei_device *dev)
{
return mei_reg_read(dev, ME_CB_RW);
dev->ops->host_set_ready(dev);
}
static inline bool mei_host_is_ready(struct mei_device *dev)
{
return dev->ops->host_is_ready(dev);
}
static inline bool mei_hw_is_ready(struct mei_device *dev)
{
return dev->ops->hw_is_ready(dev);
}
/*
* mei interface function prototypes
*/
void mei_hcsr_set(struct mei_device *dev);
void mei_csr_clear_his(struct mei_device *dev);
void mei_enable_interrupts(struct mei_device *dev);
void mei_disable_interrupts(struct mei_device *dev);
static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
static inline bool mei_hbuf_is_ready(struct mei_device *dev)
{
struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
hdr->host_addr = 0;
hdr->me_addr = 0;
hdr->length = length;
hdr->msg_complete = 1;
hdr->reserved = 0;
return hdr;
return dev->ops->hbuf_is_ready(dev);
}
static inline int mei_hbuf_empty_slots(struct mei_device *dev)
{
return dev->ops->hbuf_free_slots(dev);
}
static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
{
return dev->ops->hbuf_max_len(dev);
}
static inline int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *hdr,
unsigned char *buf)
{
return dev->ops->write(dev, hdr, buf);
}
static inline u32 mei_read_hdr(const struct mei_device *dev)
{
return dev->ops->read_hdr(dev);
}
static inline void mei_read_slots(struct mei_device *dev,
unsigned char *buf, unsigned long len)
{
dev->ops->read(dev, buf, len);
}
static inline int mei_count_full_read_slots(struct mei_device *dev)
{
return dev->ops->rdbuf_full_slots(dev);
}
int mei_register(struct device *dev);
void mei_deregister(void);
#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
(hdr)->length, (hdr)->msg_complete
#endif

396
drivers/misc/mei/pci-me.c Normal file
View File

@ -0,0 +1,396 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/uuid.h>
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hw-me.h"
#include "client.h"
/* AMT device is a singleton on the platform */
static struct pci_dev *mei_pdev;
/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
static DEFINE_MUTEX(mei_mutex);
/**
* mei_quirk_probe - probe for devices that doesn't valid ME interface
* @pdev: PCI device structure
* @ent: entry into pci_device_table
*
* returns true if ME Interface is valid, false otherwise
*/
static bool mei_quirk_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u32 reg;
if (ent->device == MEI_DEV_ID_PBG_1) {
pci_read_config_dword(pdev, 0x48, &reg);
/* make sure that bit 9 is up and bit 10 is down */
if ((reg & 0x600) == 0x200) {
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
return false;
}
}
return true;
}
/**
* mei_probe - Device Initialization Routine
*
* @pdev: PCI device structure
* @ent: entry in kcs_pci_tbl
*
* returns 0 on success, <0 on failure.
*/
static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int err;
mutex_lock(&mei_mutex);
if (!mei_quirk_probe(pdev, ent)) {
err = -ENODEV;
goto end;
}
if (mei_pdev) {
err = -EEXIST;
goto end;
}
/* enable pci dev */
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
/* pci request regions for mei driver */
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto disable_device;
}
/* allocates and initializes the mei dev structure */
dev = mei_me_dev_init(pdev);
if (!dev) {
err = -ENOMEM;
goto release_regions;
}
hw = to_me_hw(dev);
/* mapping IO device memory */
hw->mem_addr = pci_iomap(pdev, 0, 0);
if (!hw->mem_addr) {
dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
err = -ENOMEM;
goto free_device;
}
pci_enable_msi(pdev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_me_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto disable_msi;
}
if (mei_hw_init(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
}
err = mei_register(&pdev->dev);
if (err)
goto release_irq;
mei_pdev = pdev;
pci_set_drvdata(pdev, dev);
schedule_delayed_work(&dev->timer_work, HZ);
mutex_unlock(&mei_mutex);
pr_debug("initialization successful.\n");
return 0;
release_irq:
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
disable_msi:
pci_disable_msi(pdev);
pci_iounmap(pdev, hw->mem_addr);
free_device:
kfree(dev);
release_regions:
pci_release_regions(pdev);
disable_device:
pci_disable_device(pdev);
end:
mutex_unlock(&mei_mutex);
dev_err(&pdev->dev, "initialization failed.\n");
return err;
}
/**
* mei_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
* mei_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
static void mei_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
struct mei_me_hw *hw;
if (mei_pdev != pdev)
return;
dev = pci_get_drvdata(pdev);
if (!dev)
return;
hw = to_me_hw(dev);
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
mei_wd_stop(dev);
mei_pdev = NULL;
if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
mei_cl_disconnect(&dev->iamthif_cl);
}
if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
dev->wd_cl.state = MEI_FILE_DISCONNECTING;
mei_cl_disconnect(&dev->wd_cl);
}
/* Unregistering watchdog device */
mei_watchdog_unregister(dev);
/* remove entry if already in list */
dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_cl_unlink(&dev->wd_cl);
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_cl_unlink(&dev->iamthif_cl);
dev->iamthif_current_cb = NULL;
dev->me_clients_num = 0;
mutex_unlock(&dev->device_lock);
flush_scheduled_work();
/* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
pci_set_drvdata(pdev, NULL);
if (hw->mem_addr)
pci_iounmap(pdev, hw->mem_addr);
kfree(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
mei_deregister();
}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
int err;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
/* Stop watchdog if exists */
err = mei_wd_stop(dev);
/* Set new mei state */
if (dev->dev_state == MEI_DEV_ENABLED ||
dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev, 0);
}
mutex_unlock(&dev->device_lock);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
return err;
}
static int mei_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int err;
dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
pci_enable_msi(pdev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_me_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
pdev->irq);
return err;
}
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
return err;
}
static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
#define MEI_PM_OPS (&mei_pm_ops)
#else
#define MEI_PM_OPS NULL
#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
static struct pci_driver mei_driver = {
.name = KBUILD_MODNAME,
.id_table = mei_pci_tbl,
.probe = mei_probe,
.remove = mei_remove,
.shutdown = mei_remove,
.driver.pm = MEI_PM_OPS,
};
module_pci_driver(mei_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");

View File

@ -21,11 +21,13 @@
#include <linux/sched.h>
#include <linux/watchdog.h>
#include "mei_dev.h"
#include "hw.h"
#include "interface.h"
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "hw-me.h"
#include "client.h"
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*/
int mei_wd_host_init(struct mei_device *dev)
{
int id;
mei_cl_init(&dev->wd_cl, dev);
struct mei_cl *cl = &dev->wd_cl;
int i;
int ret;
mei_cl_init(cl, dev);
/* look for WD client and connect to it */
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
/* Connect WD ME client to the host client */
id = mei_me_cl_link(dev, &dev->wd_cl,
&mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
if (id < 0) {
/* check for valid client id */
i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
return -ENOENT;
}
if (mei_connect(dev, &dev->wd_cl)) {
cl->me_client_id = dev->me_clients[i].client_id;
ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
if (ret < 0) {
dev_info(&dev->pdev->dev, "wd: failed link client\n");
return -ENOENT;
}
cl->state = MEI_FILE_CONNECTING;
if (mei_hbm_cl_connect_req(dev, cl)) {
dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_cl.host_client_id = 0;
cl->state = MEI_FILE_DISCONNECTED;
cl->host_client_id = 0;
return -EIO;
}
dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
cl->timer_count = MEI_CONNECT_TIMEOUT;
return 0;
}
@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
*/
int mei_wd_send(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct mei_msg_hdr hdr;
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = dev->wd_cl.host_client_id;
mei_hdr->me_addr = dev->wd_cl.me_client_id;
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
hdr.host_addr = dev->wd_cl.host_client_id;
hdr.me_addr = dev->wd_cl.me_client_id;
hdr.msg_complete = 1;
hdr.reserved = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
mei_hdr->length = MEI_WD_START_MSG_SIZE;
hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
hdr.length = MEI_WD_STOP_MSG_SIZE;
else
return -EINVAL;
return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
return mei_write_message(dev, &hdr, dev->wd_data);
}
/**
@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
dev->wd_state = MEI_WD_STOPPING;
ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
goto out;
if (ret && dev->mei_host_buffer_is_empty) {
if (ret && dev->hbuf_is_ready) {
ret = 0;
dev->mei_host_buffer_is_empty = false;
dev->hbuf_is_ready = false;
if (!mei_wd_send(dev)) {
ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
if (ret)
goto out;
} else {
@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
/* Check if we can send the ping to HW*/
if (dev->mei_host_buffer_is_empty &&
mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
dev->mei_host_buffer_is_empty = false;
dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
if (mei_wd_send(dev)) {
@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
goto end;
}
if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
dev_err(&dev->pdev->dev,
"wd: mei_flow_ctrl_reduce() failed.\n");
"wd: mei_cl_flow_ctrl_reduce() failed.\n");
ret = -EIO;
goto end;
}

View File

@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
char *ptr;
struct st_proto_s *proto;
unsigned short payload_len = 0;
int len = 0, type = 0;
int len = 0;
unsigned char type = 0;
unsigned char *plen;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;

View File

@ -0,0 +1,16 @@
#
# VMware VMCI device
#
config VMWARE_VMCI
tristate "VMware VMCI Driver"
depends on X86 && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
environment via the VMCI virtual device.
If unsure, say N.
To compile this driver as a module, choose M here: the
module will be called vmw_vmci.

View File

@ -0,0 +1,4 @@
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,182 @@
/*
* VMware VMCI driver (vmciContext.h)
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_CONTEXT_H_
#define _VMCI_CONTEXT_H_
#include <linux/vmw_vmci_defs.h>
#include <linux/atomic.h>
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "vmci_handle_array.h"
#include "vmci_datagram.h"
/* Used to determine what checkpoint state to get and set. */
enum {
VMCI_NOTIFICATION_CPT_STATE = 1,
VMCI_WELLKNOWN_CPT_STATE = 2,
VMCI_DG_OUT_STATE = 3,
VMCI_DG_IN_STATE = 4,
VMCI_DG_IN_SIZE_STATE = 5,
VMCI_DOORBELL_CPT_STATE = 6,
};
/* Host specific struct used for signalling */
struct vmci_host {
wait_queue_head_t wait_queue;
};
struct vmci_handle_list {
struct list_head node;
struct vmci_handle handle;
};
struct vmci_ctx {
struct list_head list_item; /* For global VMCI list. */
u32 cid;
struct kref kref;
struct list_head datagram_queue; /* Head of per VM queue. */
u32 pending_datagrams;
size_t datagram_queue_size; /* Size of datagram queue in bytes. */
/*
* Version of the code that created
* this context; e.g., VMX.
*/
int user_version;
spinlock_t lock; /* Locks callQueue and handle_arrays. */
/*
* queue_pairs attached to. The array of
* handles for queue pairs is accessed
* from the code for QP API, and there
* it is protected by the QP lock. It
* is also accessed from the context
* clean up path, which does not
* require a lock. VMCILock is not
* used to protect the QP array field.
*/
struct vmci_handle_arr *queue_pair_array;
/* Doorbells created by context. */
struct vmci_handle_arr *doorbell_array;
/* Doorbells pending for context. */
struct vmci_handle_arr *pending_doorbell_array;
/* Contexts current context is subscribing to. */
struct list_head notifier_list;
unsigned int n_notifiers;
struct vmci_host host_context;
u32 priv_flags;
const struct cred *cred;
bool *notify; /* Notify flag pointer - hosted only. */
struct page *notify_page; /* Page backing the notify UVA. */
};
/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
struct vmci_ctx_info {
u32 remote_cid;
int result;
};
/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
struct vmci_ctx_chkpt_buf_info {
u64 cpt_buf;
u32 cpt_type;
u32 buf_size;
s32 result;
u32 _pad;
};
/*
* VMCINotificationReceiveInfo: Used to recieve pending notifications
* for doorbells and queue pairs.
*/
struct vmci_ctx_notify_recv_info {
u64 db_handle_buf_uva;
u64 db_handle_buf_size;
u64 qp_handle_buf_uva;
u64 qp_handle_buf_size;
s32 result;
u32 _pad;
};
/*
* Utilility function that checks whether two entities are allowed
* to interact. If one of them is restricted, the other one must
* be trusted.
*/
static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
{
return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
!(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
!(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
}
struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
uintptr_t event_hnd, int version,
const struct cred *cred);
void vmci_ctx_destroy(struct vmci_ctx *context);
bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
size_t *max_size, struct vmci_datagram **dg);
int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
struct vmci_ctx *vmci_ctx_get(u32 cid);
void vmci_ctx_put(struct vmci_ctx *context);
bool vmci_ctx_exists(u32 cid);
int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
u32 *num_cids, void **cpt_buf_ptr);
int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
u32 num_cids, void *cpt_buf);
int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
void vmci_ctx_unset_notify(struct vmci_ctx *context);
int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
int vmci_ctx_dbell_destroy_all(u32 context_id);
int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
u32 src_priv_flags);
int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
**db_handle_array, struct vmci_handle_arr
**qp_handle_array);
void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
*db_handle_array, struct vmci_handle_arr
*qp_handle_array, bool success);
static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
{
if (!context)
return VMCI_INVALID_ID;
return context->cid;
}
#endif /* _VMCI_CONTEXT_H_ */

View File

@ -0,0 +1,500 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include "vmci_datagram.h"
#include "vmci_resource.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#include "vmci_route.h"
/*
* struct datagram_entry describes the datagram entity. It is used for datagram
* entities created only on the host.
*/
struct datagram_entry {
struct vmci_resource resource;
u32 flags;
bool run_delayed;
vmci_datagram_recv_cb recv_cb;
void *client_data;
u32 priv_flags;
};
struct delayed_datagram_info {
struct datagram_entry *entry;
struct vmci_datagram msg;
struct work_struct work;
bool in_dg_host_queue;
};
/* Number of in-flight host->host datagrams */
static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
/*
* Create a datagram entry given a handle pointer.
*/
static int dg_create_handle(u32 resource_id,
u32 flags,
u32 priv_flags,
vmci_datagram_recv_cb recv_cb,
void *client_data, struct vmci_handle *out_handle)
{
int result;
u32 context_id;
struct vmci_handle handle;
struct datagram_entry *entry;
if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
return VMCI_ERROR_INVALID_ARGS;
if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
context_id = VMCI_INVALID_ID;
} else {
context_id = vmci_get_context_id();
if (context_id == VMCI_INVALID_ID)
return VMCI_ERROR_NO_RESOURCES;
}
handle = vmci_make_handle(context_id, resource_id);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_warn("Failed allocating memory for datagram entry\n");
return VMCI_ERROR_NO_MEM;
}
entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
entry->flags = flags;
entry->recv_cb = recv_cb;
entry->client_data = client_data;
entry->priv_flags = priv_flags;
/* Make datagram resource live. */
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_DATAGRAM,
handle);
if (result != VMCI_SUCCESS) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
handle.context, handle.resource, result);
kfree(entry);
return result;
}
*out_handle = vmci_resource_handle(&entry->resource);
return VMCI_SUCCESS;
}
/*
* Internal utility function with the same purpose as
* vmci_datagram_get_priv_flags that also takes a context_id.
*/
static int vmci_datagram_get_priv_flags(u32 context_id,
struct vmci_handle handle,
u32 *priv_flags)
{
if (context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
if (context_id == VMCI_HOST_CONTEXT_ID) {
struct datagram_entry *src_entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource)
return VMCI_ERROR_INVALID_ARGS;
src_entry = container_of(resource, struct datagram_entry,
resource);
*priv_flags = src_entry->priv_flags;
vmci_resource_put(resource);
} else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
*priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
else
*priv_flags = vmci_context_get_priv_flags(context_id);
return VMCI_SUCCESS;
}
/*
* Calls the specified callback in a delayed context.
*/
static void dg_delayed_dispatch(struct work_struct *work)
{
struct delayed_datagram_info *dg_info =
container_of(work, struct delayed_datagram_info, work);
dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
vmci_resource_put(&dg_info->entry->resource);
if (dg_info->in_dg_host_queue)
atomic_dec(&delayed_dg_host_queue_size);
kfree(dg_info);
}
/*
* Dispatch datagram as a host, to the host, or other vm context. This
* function cannot dispatch to hypervisor context handlers. This should
* have been handled before we get here by vmci_datagram_dispatch.
* Returns number of bytes sent on success, error code otherwise.
*/
static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
{
int retval;
size_t dg_size;
u32 src_priv_flags;
dg_size = VMCI_DG_SIZE(dg);
/* Host cannot send to the hypervisor. */
if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
return VMCI_ERROR_DST_UNREACHABLE;
/* Check that source handle matches sending context. */
if (dg->src.context != context_id) {
pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
context_id, dg->src.context, dg->src.resource);
return VMCI_ERROR_NO_ACCESS;
}
/* Get hold of privileges of sending endpoint. */
retval = vmci_datagram_get_priv_flags(context_id, dg->src,
&src_priv_flags);
if (retval != VMCI_SUCCESS) {
pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
dg->src.context, dg->src.resource);
return retval;
}
/* Determine if we should route to host or guest destination. */
if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
/* Route to host datagram entry. */
struct datagram_entry *dst_entry;
struct vmci_resource *resource;
if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
dg->dst.resource == VMCI_EVENT_HANDLER) {
return vmci_event_dispatch(dg);
}
resource = vmci_resource_by_handle(dg->dst,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
dg->dst.context, dg->dst.resource);
return VMCI_ERROR_INVALID_RESOURCE;
}
dst_entry = container_of(resource, struct datagram_entry,
resource);
if (vmci_deny_interaction(src_priv_flags,
dst_entry->priv_flags)) {
vmci_resource_put(resource);
return VMCI_ERROR_NO_ACCESS;
}
/*
* If a VMCI datagram destined for the host is also sent by the
* host, we always run it delayed. This ensures that no locks
* are held when the datagram callback runs.
*/
if (dst_entry->run_delayed ||
dg->src.context == VMCI_HOST_CONTEXT_ID) {
struct delayed_datagram_info *dg_info;
if (atomic_add_return(1, &delayed_dg_host_queue_size)
== VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info = kmalloc(sizeof(*dg_info) +
(size_t) dg->payload_size, GFP_ATOMIC);
if (!dg_info) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, dg_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
retval = VMCI_SUCCESS;
} else {
retval = dst_entry->recv_cb(dst_entry->client_data, dg);
vmci_resource_put(resource);
if (retval < VMCI_SUCCESS)
return retval;
}
} else {
/* Route to destination VM context. */
struct vmci_datagram *new_dg;
if (context_id != dg->dst.context) {
if (vmci_deny_interaction(src_priv_flags,
vmci_context_get_priv_flags
(dg->dst.context))) {
return VMCI_ERROR_NO_ACCESS;
} else if (VMCI_CONTEXT_IS_VM(context_id)) {
/*
* If the sending context is a VM, it
* cannot reach another VM.
*/
pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
context_id, dg->dst.context);
return VMCI_ERROR_DST_UNREACHABLE;
}
}
/* We make a copy to enqueue. */
new_dg = kmalloc(dg_size, GFP_KERNEL);
if (new_dg == NULL)
return VMCI_ERROR_NO_MEM;
memcpy(new_dg, dg, dg_size);
retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
if (retval < VMCI_SUCCESS) {
kfree(new_dg);
return retval;
}
}
/*
* We currently truncate the size to signed 32 bits. This doesn't
* matter for this handler as it only support 4Kb messages.
*/
return (int)dg_size;
}
/*
* Dispatch datagram as a guest, down through the VMX and potentially to
* the host.
* Returns number of bytes sent on success, error code otherwise.
*/
static int dg_dispatch_as_guest(struct vmci_datagram *dg)
{
int retval;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(dg->src,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource)
return VMCI_ERROR_NO_HANDLE;
retval = vmci_send_datagram(dg);
vmci_resource_put(resource);
return retval;
}
/*
* Dispatch datagram. This will determine the routing for the datagram
* and dispatch it accordingly.
* Returns number of bytes sent on success, error code otherwise.
*/
int vmci_datagram_dispatch(u32 context_id,
struct vmci_datagram *dg, bool from_guest)
{
int retval;
enum vmci_route route;
BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
pr_devel("Payload (size=%llu bytes) too big to send\n",
(unsigned long long)dg->payload_size);
return VMCI_ERROR_INVALID_ARGS;
}
retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
if (retval < VMCI_SUCCESS) {
pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
dg->src.context, dg->dst.context, retval);
return retval;
}
if (VMCI_ROUTE_AS_HOST == route) {
if (VMCI_INVALID_ID == context_id)
context_id = VMCI_HOST_CONTEXT_ID;
return dg_dispatch_as_host(context_id, dg);
}
if (VMCI_ROUTE_AS_GUEST == route)
return dg_dispatch_as_guest(dg);
pr_warn("Unknown route (%d) for datagram\n", route);
return VMCI_ERROR_DST_UNREACHABLE;
}
/*
* Invoke the handler for the given datagram. This is intended to be
* called only when acting as a guest and receiving a datagram from the
* virtual device.
*/
int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
{
struct vmci_resource *resource;
struct datagram_entry *dst_entry;
resource = vmci_resource_by_handle(dg->dst,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
dg->dst.context, dg->dst.resource);
return VMCI_ERROR_NO_HANDLE;
}
dst_entry = container_of(resource, struct datagram_entry, resource);
if (dst_entry->run_delayed) {
struct delayed_datagram_info *dg_info;
dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
GFP_ATOMIC);
if (!dg_info) {
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
} else {
dst_entry->recv_cb(dst_entry->client_data, dg);
vmci_resource_put(resource);
}
return VMCI_SUCCESS;
}
/*
* vmci_datagram_create_handle_priv() - Create host context datagram endpoint
* @resource_id: The resource ID.
* @flags: Datagram Flags.
* @priv_flags: Privilege Flags.
* @recv_cb: Callback when receiving datagrams.
* @client_data: Pointer for a datagram_entry struct
* @out_handle: vmci_handle that is populated as a result of this function.
*
* Creates a host context datagram endpoint and returns a handle to it.
*/
int vmci_datagram_create_handle_priv(u32 resource_id,
u32 flags,
u32 priv_flags,
vmci_datagram_recv_cb recv_cb,
void *client_data,
struct vmci_handle *out_handle)
{
if (out_handle == NULL)
return VMCI_ERROR_INVALID_ARGS;
if (recv_cb == NULL) {
pr_devel("Client callback needed when creating datagram\n");
return VMCI_ERROR_INVALID_ARGS;
}
if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
return VMCI_ERROR_INVALID_ARGS;
return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
client_data, out_handle);
}
EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
/*
* vmci_datagram_create_handle() - Create host context datagram endpoint
* @resource_id: Resource ID.
* @flags: Datagram Flags.
* @recv_cb: Callback when receiving datagrams.
* @client_ata: Pointer for a datagram_entry struct
* @out_handle: vmci_handle that is populated as a result of this function.
*
* Creates a host context datagram endpoint and returns a handle to
* it. Same as vmci_datagram_create_handle_priv without the priviledge
* flags argument.
*/
int vmci_datagram_create_handle(u32 resource_id,
u32 flags,
vmci_datagram_recv_cb recv_cb,
void *client_data,
struct vmci_handle *out_handle)
{
return vmci_datagram_create_handle_priv(
resource_id, flags,
VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
recv_cb, client_data,
out_handle);
}
EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
/*
* vmci_datagram_destroy_handle() - Destroys datagram handle
* @handle: vmci_handle to be destroyed and reaped.
*
* Use this function to destroy any datagram handles created by
* vmci_datagram_create_handle{,Priv} functions.
*/
int vmci_datagram_destroy_handle(struct vmci_handle handle)
{
struct datagram_entry *entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct datagram_entry, resource);
vmci_resource_put(&entry->resource);
vmci_resource_remove(&entry->resource);
kfree(entry);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
/*
* vmci_datagram_send() - Send a datagram
* @msg: The datagram to send.
*
* Sends the provided datagram on its merry way.
*/
int vmci_datagram_send(struct vmci_datagram *msg)
{
if (msg == NULL)
return VMCI_ERROR_INVALID_ARGS;
return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
}
EXPORT_SYMBOL_GPL(vmci_datagram_send);

View File

@ -0,0 +1,52 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_DATAGRAM_H_
#define _VMCI_DATAGRAM_H_
#include <linux/types.h>
#include <linux/list.h>
#include "vmci_context.h"
#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
/*
* The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
* datagram queues. It is allocated in non-paged memory, as the
* content is accessed while holding a spinlock. The pending datagram
* itself may be allocated from paged memory. We shadow the size of
* the datagram in the non-paged queue entry as this size is used
* while holding the same spinlock as above.
*/
struct vmci_datagram_queue_entry {
struct list_head list_item; /* For queuing. */
size_t dg_size; /* Size of datagram. */
struct vmci_datagram *dg; /* Pending datagram. */
};
/* VMCIDatagramSendRecvInfo */
struct vmci_datagram_snd_rcv_info {
u64 addr;
u32 len;
s32 result;
};
/* Datagram API for non-public use. */
int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
bool from_guest);
int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
#endif /* _VMCI_DATAGRAM_H_ */

View File

@ -0,0 +1,604 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/completion.h>
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_resource.h"
#include "vmci_driver.h"
#include "vmci_route.h"
#define VMCI_DOORBELL_INDEX_BITS 6
#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
/*
* DoorbellEntry describes the a doorbell notification handle allocated by the
* host.
*/
struct dbell_entry {
struct vmci_resource resource;
struct hlist_node node;
struct work_struct work;
vmci_callback notify_cb;
void *client_data;
u32 idx;
u32 priv_flags;
bool run_delayed;
atomic_t active; /* Only used by guest personality */
};
/* The VMCI index table keeps track of currently registered doorbells. */
struct dbell_index_table {
spinlock_t lock; /* Index table lock */
struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
};
static struct dbell_index_table vmci_doorbell_it = {
.lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
};
/*
* The max_notify_idx is one larger than the currently known bitmap index in
* use, and is used to determine how much of the bitmap needs to be scanned.
*/
static u32 max_notify_idx;
/*
* The notify_idx_count is used for determining whether there are free entries
* within the bitmap (if notify_idx_count + 1 < max_notify_idx).
*/
static u32 notify_idx_count;
/*
* The last_notify_idx_reserved is used to track the last index handed out - in
* the case where multiple handles share a notification index, we hand out
* indexes round robin based on last_notify_idx_reserved.
*/
static u32 last_notify_idx_reserved;
/* This is a one entry cache used to by the index allocation. */
static u32 last_notify_idx_released = PAGE_SIZE;
/*
* Utility function that retrieves the privilege flags associated
* with a given doorbell handle. For guest endpoints, the
* privileges are determined by the context ID, but for host
* endpoints privileges are associated with the complete
* handle. Hypervisor endpoints are not yet supported.
*/
int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
{
if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
if (handle.context == VMCI_HOST_CONTEXT_ID) {
struct dbell_entry *entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource)
return VMCI_ERROR_NOT_FOUND;
entry = container_of(resource, struct dbell_entry, resource);
*priv_flags = entry->priv_flags;
vmci_resource_put(resource);
} else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
/*
* Hypervisor endpoints for notifications are not
* supported (yet).
*/
return VMCI_ERROR_INVALID_ARGS;
} else {
*priv_flags = vmci_context_get_priv_flags(handle.context);
}
return VMCI_SUCCESS;
}
/*
* Find doorbell entry by bitmap index.
*/
static struct dbell_entry *dbell_index_table_find(u32 idx)
{
u32 bucket = VMCI_DOORBELL_HASH(idx);
struct dbell_entry *dbell;
struct hlist_node *node;
hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
node) {
if (idx == dbell->idx)
return dbell;
}
return NULL;
}
/*
* Add the given entry to the index table. This willi take a reference to the
* entry's resource so that the entry is not deleted before it is removed from
* the * table.
*/
static void dbell_index_table_add(struct dbell_entry *entry)
{
u32 bucket;
u32 new_notify_idx;
vmci_resource_get(&entry->resource);
spin_lock_bh(&vmci_doorbell_it.lock);
/*
* Below we try to allocate an index in the notification
* bitmap with "not too much" sharing between resources. If we
* use less that the full bitmap, we either add to the end if
* there are no unused flags within the currently used area,
* or we search for unused ones. If we use the full bitmap, we
* allocate the index round robin.
*/
if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
if (last_notify_idx_released < max_notify_idx &&
!dbell_index_table_find(last_notify_idx_released)) {
new_notify_idx = last_notify_idx_released;
last_notify_idx_released = PAGE_SIZE;
} else {
bool reused = false;
new_notify_idx = last_notify_idx_reserved;
if (notify_idx_count + 1 < max_notify_idx) {
do {
if (!dbell_index_table_find
(new_notify_idx)) {
reused = true;
break;
}
new_notify_idx = (new_notify_idx + 1) %
max_notify_idx;
} while (new_notify_idx !=
last_notify_idx_released);
}
if (!reused) {
new_notify_idx = max_notify_idx;
max_notify_idx++;
}
}
} else {
new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
}
last_notify_idx_reserved = new_notify_idx;
notify_idx_count++;
entry->idx = new_notify_idx;
bucket = VMCI_DOORBELL_HASH(entry->idx);
hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
spin_unlock_bh(&vmci_doorbell_it.lock);
}
/*
* Remove the given entry from the index table. This will release() the
* entry's resource.
*/
static void dbell_index_table_remove(struct dbell_entry *entry)
{
spin_lock_bh(&vmci_doorbell_it.lock);
hlist_del_init(&entry->node);
notify_idx_count--;
if (entry->idx == max_notify_idx - 1) {
/*
* If we delete an entry with the maximum known
* notification index, we take the opportunity to
* prune the current max. As there might be other
* unused indices immediately below, we lower the
* maximum until we hit an index in use.
*/
while (max_notify_idx > 0 &&
!dbell_index_table_find(max_notify_idx - 1))
max_notify_idx--;
}
last_notify_idx_released = entry->idx;
spin_unlock_bh(&vmci_doorbell_it.lock);
vmci_resource_put(&entry->resource);
}
/*
* Creates a link between the given doorbell handle and the given
* index in the bitmap in the device backend. A notification state
* is created in hypervisor.
*/
static int dbell_link(struct vmci_handle handle, u32 notify_idx)
{
struct vmci_doorbell_link_msg link_msg;
link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_LINK);
link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
link_msg.handle = handle;
link_msg.notify_idx = notify_idx;
return vmci_send_datagram(&link_msg.hdr);
}
/*
* Unlinks the given doorbell handle from an index in the bitmap in
* the device backend. The notification state is destroyed in hypervisor.
*/
static int dbell_unlink(struct vmci_handle handle)
{
struct vmci_doorbell_unlink_msg unlink_msg;
unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_UNLINK);
unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
unlink_msg.handle = handle;
return vmci_send_datagram(&unlink_msg.hdr);
}
/*
* Notify another guest or the host. We send a datagram down to the
* host via the hypervisor with the notification info.
*/
static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
{
struct vmci_doorbell_notify_msg notify_msg;
notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_NOTIFY);
notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
notify_msg.handle = handle;
return vmci_send_datagram(&notify_msg.hdr);
}
/*
* Calls the specified callback in a delayed context.
*/
static void dbell_delayed_dispatch(struct work_struct *work)
{
struct dbell_entry *entry = container_of(work,
struct dbell_entry, work);
entry->notify_cb(entry->client_data);
vmci_resource_put(&entry->resource);
}
/*
* Dispatches a doorbell notification to the host context.
*/
int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
{
struct dbell_entry *entry;
struct vmci_resource *resource;
if (vmci_handle_is_invalid(handle)) {
pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_INVALID_ARGS;
}
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource) {
pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct dbell_entry, resource);
if (entry->run_delayed) {
schedule_work(&entry->work);
} else {
entry->notify_cb(entry->client_data);
vmci_resource_put(resource);
}
return VMCI_SUCCESS;
}
/*
* Register the notification bitmap with the host.
*/
bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
{
int result;
struct vmci_notify_bm_set_msg bitmap_set_msg;
bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_SET_NOTIFY_BITMAP);
bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
VMCI_DG_HEADERSIZE;
bitmap_set_msg.bitmap_ppn = bitmap_ppn;
result = vmci_send_datagram(&bitmap_set_msg.hdr);
if (result != VMCI_SUCCESS) {
pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
bitmap_ppn, result);
return false;
}
return true;
}
/*
* Executes or schedules the handlers for a given notify index.
*/
static void dbell_fire_entries(u32 notify_idx)
{
u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
struct dbell_entry *dbell;
struct hlist_node *node;
spin_lock_bh(&vmci_doorbell_it.lock);
hlist_for_each_entry(dbell, node,
&vmci_doorbell_it.entries[bucket], node) {
if (dbell->idx == notify_idx &&
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
vmci_resource_get(&dbell->resource);
schedule_work(&dbell->work);
} else {
dbell->notify_cb(dbell->client_data);
}
}
}
spin_unlock_bh(&vmci_doorbell_it.lock);
}
/*
* Scans the notification bitmap, collects pending notifications,
* resets the bitmap and invokes appropriate callbacks.
*/
void vmci_dbell_scan_notification_entries(u8 *bitmap)
{
u32 idx;
for (idx = 0; idx < max_notify_idx; idx++) {
if (bitmap[idx] & 0x1) {
bitmap[idx] &= ~1;
dbell_fire_entries(idx);
}
}
}
/*
* vmci_doorbell_create() - Creates a doorbell
* @handle: A handle used to track the resource. Can be invalid.
* @flags: Flag that determines context of callback.
* @priv_flags: Privileges flags.
* @notify_cb: The callback to be ivoked when the doorbell fires.
* @client_data: A parameter to be passed to the callback.
*
* Creates a doorbell with the given callback. If the handle is
* VMCI_INVALID_HANDLE, a free handle will be assigned, if
* possible. The callback can be run immediately (potentially with
* locks held - the default) or delayed (in a kernel thread) by
* specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
* is selected, a given callback may not be run if the kernel is
* unable to allocate memory for the delayed execution (highly
* unlikely).
*/
int vmci_doorbell_create(struct vmci_handle *handle,
u32 flags,
u32 priv_flags,
vmci_callback notify_cb, void *client_data)
{
struct dbell_entry *entry;
struct vmci_handle new_handle;
int result;
if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
return VMCI_ERROR_INVALID_ARGS;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry == NULL) {
pr_warn("Failed allocating memory for datagram entry\n");
return VMCI_ERROR_NO_MEM;
}
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
bool valid_context = false;
/*
* Validate the handle. We must do both of the checks below
* because we can be acting as both a host and a guest at the
* same time. We always allow the host context ID, since the
* host functionality is in practice always there with the
* unified driver.
*/
if (handle->context == VMCI_HOST_CONTEXT_ID ||
(vmci_guest_code_active() &&
vmci_get_context_id() == handle->context)) {
valid_context = true;
}
if (!valid_context || handle->resource == VMCI_INVALID_ID) {
pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
handle->context, handle->resource);
result = VMCI_ERROR_INVALID_ARGS;
goto free_mem;
}
new_handle = *handle;
}
entry->idx = 0;
INIT_HLIST_NODE(&entry->node);
entry->priv_flags = priv_flags;
INIT_WORK(&entry->work, dbell_delayed_dispatch);
entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
entry->notify_cb = notify_cb;
entry->client_data = client_data;
atomic_set(&entry->active, 0);
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_DOORBELL,
new_handle);
if (result != VMCI_SUCCESS) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
new_handle.context, new_handle.resource, result);
goto free_mem;
}
new_handle = vmci_resource_handle(&entry->resource);
if (vmci_guest_code_active()) {
dbell_index_table_add(entry);
result = dbell_link(new_handle, entry->idx);
if (VMCI_SUCCESS != result)
goto destroy_resource;
atomic_set(&entry->active, 1);
}
*handle = new_handle;
return result;
destroy_resource:
dbell_index_table_remove(entry);
vmci_resource_remove(&entry->resource);
free_mem:
kfree(entry);
return result;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_create);
/*
* vmci_doorbell_destroy() - Destroy a doorbell.
* @handle: The handle tracking the resource.
*
* Destroys a doorbell previously created with vmcii_doorbell_create. This
* operation may block waiting for a callback to finish.
*/
int vmci_doorbell_destroy(struct vmci_handle handle)
{
struct dbell_entry *entry;
struct vmci_resource *resource;
if (vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource) {
pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct dbell_entry, resource);
if (vmci_guest_code_active()) {
int result;
dbell_index_table_remove(entry);
result = dbell_unlink(handle);
if (VMCI_SUCCESS != result) {
/*
* The only reason this should fail would be
* an inconsistency between guest and
* hypervisor state, where the guest believes
* it has an active registration whereas the
* hypervisor doesn't. One case where this may
* happen is if a doorbell is unregistered
* following a hibernation at a time where the
* doorbell state hasn't been restored on the
* hypervisor side yet. Since the handle has
* now been removed in the guest, we just
* print a warning and return success.
*/
pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
handle.context, handle.resource, result);
}
}
/*
* Now remove the resource from the table. It might still be in use
* after this, in a callback or still on the delayed work queue.
*/
vmci_resource_put(&entry->resource);
vmci_resource_remove(&entry->resource);
kfree(entry);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
/*
* vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
* @dst: The handlle identifying the doorbell resource
* @priv_flags: Priviledge flags.
*
* Generates a notification on the doorbell identified by the
* handle. For host side generation of notifications, the caller
* can specify what the privilege of the calling side is.
*/
int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
{
int retval;
enum vmci_route route;
struct vmci_handle src;
if (vmci_handle_is_invalid(dst) ||
(priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
return VMCI_ERROR_INVALID_ARGS;
src = VMCI_INVALID_HANDLE;
retval = vmci_route(&src, &dst, false, &route);
if (retval < VMCI_SUCCESS)
return retval;
if (VMCI_ROUTE_AS_HOST == route)
return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
dst, priv_flags);
if (VMCI_ROUTE_AS_GUEST == route)
return dbell_notify_as_guest(dst, priv_flags);
pr_warn("Unknown route (%d) for doorbell\n", route);
return VMCI_ERROR_DST_UNREACHABLE;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_notify);

View File

@ -0,0 +1,51 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef VMCI_DOORBELL_H
#define VMCI_DOORBELL_H
#include <linux/vmw_vmci_defs.h>
#include <linux/types.h>
#include "vmci_driver.h"
/*
* VMCINotifyResourceInfo: Used to create and destroy doorbells, and
* generate a notification for a doorbell or queue pair.
*/
struct vmci_dbell_notify_resource_info {
struct vmci_handle handle;
u16 resource;
u16 action;
s32 result;
};
/*
* Structure used for checkpointing the doorbell mappings. It is
* written to the checkpoint as is, so changing this structure will
* break checkpoint compatibility.
*/
struct dbell_cpt_state {
struct vmci_handle handle;
u64 bitmap_idx;
};
int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
void vmci_dbell_scan_notification_entries(u8 *bitmap);
#endif /* VMCI_DOORBELL_H */

View File

@ -0,0 +1,117 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include "vmci_driver.h"
#include "vmci_event.h"
static bool vmci_disable_host;
module_param_named(disable_host, vmci_disable_host, bool, 0);
MODULE_PARM_DESC(disable_host,
"Disable driver host personality (default=enabled)");
static bool vmci_disable_guest;
module_param_named(disable_guest, vmci_disable_guest, bool, 0);
MODULE_PARM_DESC(disable_guest,
"Disable driver guest personality (default=enabled)");
static bool vmci_guest_personality_initialized;
static bool vmci_host_personality_initialized;
/*
* vmci_get_context_id() - Gets the current context ID.
*
* Returns the current context ID. Note that since this is accessed only
* from code running in the host, this always returns the host context ID.
*/
u32 vmci_get_context_id(void)
{
if (vmci_guest_code_active())
return vmci_get_vm_context_id();
else if (vmci_host_code_active())
return VMCI_HOST_CONTEXT_ID;
return VMCI_INVALID_ID;
}
EXPORT_SYMBOL_GPL(vmci_get_context_id);
static int __init vmci_drv_init(void)
{
int vmci_err;
int error;
vmci_err = vmci_event_init();
if (vmci_err < VMCI_SUCCESS) {
pr_err("Failed to initialize VMCIEvent (result=%d)\n",
vmci_err);
return -EINVAL;
}
if (!vmci_disable_guest) {
error = vmci_guest_init();
if (error) {
pr_warn("Failed to initialize guest personality (err=%d)\n",
error);
} else {
vmci_guest_personality_initialized = true;
pr_info("Guest personality initialized and is %s\n",
vmci_guest_code_active() ?
"active" : "inactive");
}
}
if (!vmci_disable_host) {
error = vmci_host_init();
if (error) {
pr_warn("Unable to initialize host personality (err=%d)\n",
error);
} else {
vmci_host_personality_initialized = true;
pr_info("Initialized host personality\n");
}
}
if (!vmci_guest_personality_initialized &&
!vmci_host_personality_initialized) {
vmci_event_exit();
return -ENODEV;
}
return 0;
}
module_init(vmci_drv_init);
static void __exit vmci_drv_exit(void)
{
if (vmci_guest_personality_initialized)
vmci_guest_exit();
if (vmci_host_personality_initialized)
vmci_host_exit();
vmci_event_exit();
}
module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
MODULE_VERSION("1.0.0.0-k");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,50 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_DRIVER_H_
#define _VMCI_DRIVER_H_
#include <linux/vmw_vmci_defs.h>
#include <linux/wait.h>
#include "vmci_queue_pair.h"
#include "vmci_context.h"
enum vmci_obj_type {
VMCIOBJ_VMX_VM = 10,
VMCIOBJ_CONTEXT,
VMCIOBJ_SOCKET,
VMCIOBJ_NOT_SET,
};
/* For storing VMCI structures in file handles. */
struct vmci_obj {
void *ptr;
enum vmci_obj_type type;
};
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
int vmci_host_init(void);
void vmci_host_exit(void);
bool vmci_host_code_active(void);
int vmci_guest_init(void);
void vmci_guest_exit(void);
bool vmci_guest_code_active(void);
u32 vmci_get_vm_context_id(void);
#endif /* _VMCI_DRIVER_H_ */

View File

@ -0,0 +1,224 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "vmci_driver.h"
#include "vmci_event.h"
#define EVENT_MAGIC 0xEABE0000
#define VMCI_EVENT_MAX_ATTEMPTS 10
struct vmci_subscription {
u32 id;
u32 event;
vmci_event_cb callback;
void *callback_data;
struct list_head node; /* on one of subscriber lists */
};
static struct list_head subscriber_array[VMCI_EVENT_MAX];
static DEFINE_MUTEX(subscriber_mutex);
int __init vmci_event_init(void)
{
int i;
for (i = 0; i < VMCI_EVENT_MAX; i++)
INIT_LIST_HEAD(&subscriber_array[i]);
return VMCI_SUCCESS;
}
void vmci_event_exit(void)
{
int e;
/* We free all memory at exit. */
for (e = 0; e < VMCI_EVENT_MAX; e++) {
struct vmci_subscription *cur, *p2;
list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
/*
* We should never get here because all events
* should have been unregistered before we try
* to unload the driver module.
*/
pr_warn("Unexpected free events occurring\n");
list_del(&cur->node);
kfree(cur);
}
}
}
/*
* Find entry. Assumes subscriber_mutex is held.
*/
static struct vmci_subscription *event_find(u32 sub_id)
{
int e;
for (e = 0; e < VMCI_EVENT_MAX; e++) {
struct vmci_subscription *cur;
list_for_each_entry(cur, &subscriber_array[e], node) {
if (cur->id == sub_id)
return cur;
}
}
return NULL;
}
/*
* Actually delivers the events to the subscribers.
* The callback function for each subscriber is invoked.
*/
static void event_deliver(struct vmci_event_msg *event_msg)
{
struct vmci_subscription *cur;
struct list_head *subscriber_list;
rcu_read_lock();
subscriber_list = &subscriber_array[event_msg->event_data.event];
list_for_each_entry_rcu(cur, subscriber_list, node) {
cur->callback(cur->id, &event_msg->event_data,
cur->callback_data);
}
rcu_read_unlock();
}
/*
* Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
* subscribers for given event.
*/
int vmci_event_dispatch(struct vmci_datagram *msg)
{
struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
if (msg->payload_size < sizeof(u32) ||
msg->payload_size > sizeof(struct vmci_event_data_max))
return VMCI_ERROR_INVALID_ARGS;
if (!VMCI_EVENT_VALID(event_msg->event_data.event))
return VMCI_ERROR_EVENT_UNKNOWN;
event_deliver(event_msg);
return VMCI_SUCCESS;
}
/*
* vmci_event_subscribe() - Subscribe to a given event.
* @event: The event to subscribe to.
* @callback: The callback to invoke upon the event.
* @callback_data: Data to pass to the callback.
* @subscription_id: ID used to track subscription. Used with
* vmci_event_unsubscribe()
*
* Subscribes to the provided event. The callback specified will be
* fired from RCU critical section and therefore must not sleep.
*/
int vmci_event_subscribe(u32 event,
vmci_event_cb callback,
void *callback_data,
u32 *new_subscription_id)
{
struct vmci_subscription *sub;
int attempts;
int retval;
bool have_new_id = false;
if (!new_subscription_id) {
pr_devel("%s: Invalid subscription (NULL)\n", __func__);
return VMCI_ERROR_INVALID_ARGS;
}
if (!VMCI_EVENT_VALID(event) || !callback) {
pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
__func__, event, callback, callback_data);
return VMCI_ERROR_INVALID_ARGS;
}
sub = kzalloc(sizeof(*sub), GFP_KERNEL);
if (!sub)
return VMCI_ERROR_NO_MEM;
sub->id = VMCI_EVENT_MAX;
sub->event = event;
sub->callback = callback;
sub->callback_data = callback_data;
INIT_LIST_HEAD(&sub->node);
mutex_lock(&subscriber_mutex);
/* Creation of a new event is always allowed. */
for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
static u32 subscription_id;
/*
* We try to get an id a couple of time before
* claiming we are out of resources.
*/
/* Test for duplicate id. */
if (!event_find(++subscription_id)) {
sub->id = subscription_id;
have_new_id = true;
break;
}
}
if (have_new_id) {
list_add_rcu(&sub->node, &subscriber_array[event]);
retval = VMCI_SUCCESS;
} else {
retval = VMCI_ERROR_NO_RESOURCES;
}
mutex_unlock(&subscriber_mutex);
*new_subscription_id = sub->id;
return retval;
}
EXPORT_SYMBOL_GPL(vmci_event_subscribe);
/*
* vmci_event_unsubscribe() - unsubscribe from an event.
* @sub_id: A subscription ID as provided by vmci_event_subscribe()
*
* Unsubscribe from given event. Removes it from list and frees it.
* Will return callback_data if requested by caller.
*/
int vmci_event_unsubscribe(u32 sub_id)
{
struct vmci_subscription *s;
mutex_lock(&subscriber_mutex);
s = event_find(sub_id);
if (s)
list_del_rcu(&s->node);
mutex_unlock(&subscriber_mutex);
if (!s)
return VMCI_ERROR_NOT_FOUND;
synchronize_rcu();
kfree(s);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);

View File

@ -0,0 +1,25 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __VMCI_EVENT_H__
#define __VMCI_EVENT_H__
#include <linux/vmw_vmci_api.h>
int vmci_event_init(void);
void vmci_event_exit(void);
int vmci_event_dispatch(struct vmci_datagram *msg);
#endif /*__VMCI_EVENT_H__ */

View File

@ -0,0 +1,759 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
#define VMCI_UTIL_NUM_RESOURCES 1
static bool vmci_disable_msi;
module_param_named(disable_msi, vmci_disable_msi, bool, 0);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
static bool vmci_disable_msix;
module_param_named(disable_msix, vmci_disable_msix, bool, 0);
MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
static u32 ctx_update_sub_id = VMCI_INVALID_ID;
static u32 vm_context_id = VMCI_INVALID_ID;
struct vmci_guest_device {
struct device *dev; /* PCI device we are attached to */
void __iomem *iobase;
unsigned int irq;
unsigned int intr_type;
bool exclusive_vectors;
struct msix_entry msix_entries[VMCI_MAX_INTRS];
struct tasklet_struct datagram_tasklet;
struct tasklet_struct bm_tasklet;
void *data_buffer;
void *notification_bitmap;
};
/* vmci_dev singleton device and supporting data*/
static struct vmci_guest_device *vmci_dev_g;
static DEFINE_SPINLOCK(vmci_dev_spinlock);
static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
bool vmci_guest_code_active(void)
{
return atomic_read(&vmci_num_guest_devices) != 0;
}
u32 vmci_get_vm_context_id(void)
{
if (vm_context_id == VMCI_INVALID_ID) {
struct vmci_datagram get_cid_msg;
get_cid_msg.dst =
vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_GET_CONTEXT_ID);
get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
get_cid_msg.payload_size = 0;
vm_context_id = vmci_send_datagram(&get_cid_msg);
}
return vm_context_id;
}
/*
* VM to hypervisor call mechanism. We use the standard VMware naming
* convention since shared code is calling this function as well.
*/
int vmci_send_datagram(struct vmci_datagram *dg)
{
unsigned long flags;
int result;
/* Check args. */
if (dg == NULL)
return VMCI_ERROR_INVALID_ARGS;
/*
* Need to acquire spinlock on the device because the datagram
* data may be spread over multiple pages and the monitor may
* interleave device user rpc calls from multiple
* VCPUs. Acquiring the spinlock precludes that
* possibility. Disabling interrupts to avoid incoming
* datagrams during a "rep out" and possibly landing up in
* this function.
*/
spin_lock_irqsave(&vmci_dev_spinlock, flags);
if (vmci_dev_g) {
iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
dg, VMCI_DG_SIZE(dg));
result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
} else {
result = VMCI_ERROR_UNAVAILABLE;
}
spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
return result;
}
EXPORT_SYMBOL_GPL(vmci_send_datagram);
/*
* Gets called with the new context id if updated or resumed.
* Context id.
*/
static void vmci_guest_cid_update(u32 sub_id,
const struct vmci_event_data *event_data,
void *client_data)
{
const struct vmci_event_payld_ctx *ev_payload =
vmci_event_data_const_payload(event_data);
if (sub_id != ctx_update_sub_id) {
pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
return;
}
if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
pr_devel("Invalid event data\n");
return;
}
pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
vm_context_id, ev_payload->context_id, event_data->event);
vm_context_id = ev_payload->context_id;
}
/*
* Verify that the host supports the hypercalls we need. If it does not,
* try to find fallback hypercalls and use those instead. Returns
* true if required hypercalls (or fallback hypercalls) are
* supported by the host, false otherwise.
*/
static bool vmci_check_host_caps(struct pci_dev *pdev)
{
bool result;
struct vmci_resource_query_msg *msg;
u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
struct vmci_datagram *check_msg;
check_msg = kmalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
return false;
}
check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_RESOURCES_QUERY);
check_msg->src = VMCI_ANON_SRC_HANDLE;
check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
msg->resources[0] = VMCI_GET_CONTEXT_ID;
/* Checks that hyper calls are supported */
result = vmci_send_datagram(check_msg) == 0x01;
kfree(check_msg);
dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
__func__, result ? "PASSED" : "FAILED");
/* We need the vector. There are no fallbacks. */
return result;
}
/*
* Reads datagrams from the data in port and dispatches them. We
* always start reading datagrams into only the first page of the
* datagram buffer. If the datagrams don't fit into one page, we
* use the maximum datagram buffer size for the remainder of the
* invocation. This is a simple heuristic for not penalizing
* small datagrams.
*
* This function assumes that it has exclusive access to the data
* in port for the duration of the call.
*/
static void vmci_dispatch_dgs(unsigned long data)
{
struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
u8 *dg_in_buffer = vmci_dev->data_buffer;
struct vmci_datagram *dg;
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
size_t current_dg_in_buffer_size = PAGE_SIZE;
size_t remaining_bytes;
BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
vmci_dev->data_buffer, current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
while (dg->dst.resource != VMCI_INVALID_ID ||
remaining_bytes > PAGE_SIZE) {
unsigned dg_in_size;
/*
* When the input buffer spans multiple pages, a datagram can
* start on any page boundary in the buffer.
*/
if (dg->dst.resource == VMCI_INVALID_ID) {
dg = (struct vmci_datagram *)roundup(
(uintptr_t)dg + 1, PAGE_SIZE);
remaining_bytes =
(size_t)(dg_in_buffer +
current_dg_in_buffer_size -
(u8 *)dg);
continue;
}
dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
if (dg_in_size <= dg_in_buffer_size) {
int result;
/*
* If the remaining bytes in the datagram
* buffer doesn't contain the complete
* datagram, we first make sure we have enough
* room for it and then we read the reminder
* of the datagram and possibly any following
* datagrams.
*/
if (dg_in_size > remaining_bytes) {
if (remaining_bytes !=
current_dg_in_buffer_size) {
/*
* We move the partial
* datagram to the front and
* read the reminder of the
* datagram and possibly
* following calls into the
* following bytes.
*/
memmove(dg_in_buffer, dg_in_buffer +
current_dg_in_buffer_size -
remaining_bytes,
remaining_bytes);
dg = (struct vmci_datagram *)
dg_in_buffer;
}
if (current_dg_in_buffer_size !=
dg_in_buffer_size)
current_dg_in_buffer_size =
dg_in_buffer_size;
ioread8_rep(vmci_dev->iobase +
VMCI_DATA_IN_ADDR,
vmci_dev->data_buffer +
remaining_bytes,
current_dg_in_buffer_size -
remaining_bytes);
}
/*
* We special case event datagrams from the
* hypervisor.
*/
if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
dg->dst.resource == VMCI_EVENT_HANDLER) {
result = vmci_event_dispatch(dg);
} else {
result = vmci_datagram_invoke_guest_handler(dg);
}
if (result < VMCI_SUCCESS)
dev_dbg(vmci_dev->dev,
"Datagram with resource (ID=0x%x) failed (err=%d)\n",
dg->dst.resource, result);
/* On to the next datagram. */
dg = (struct vmci_datagram *)((u8 *)dg +
dg_in_size);
} else {
size_t bytes_to_skip;
/*
* Datagram doesn't fit in datagram buffer of maximal
* size. We drop it.
*/
dev_dbg(vmci_dev->dev,
"Failed to receive datagram (size=%u bytes)\n",
dg_in_size);
bytes_to_skip = dg_in_size - remaining_bytes;
if (current_dg_in_buffer_size != dg_in_buffer_size)
current_dg_in_buffer_size = dg_in_buffer_size;
for (;;) {
ioread8_rep(vmci_dev->iobase +
VMCI_DATA_IN_ADDR,
vmci_dev->data_buffer,
current_dg_in_buffer_size);
if (bytes_to_skip <= current_dg_in_buffer_size)
break;
bytes_to_skip -= current_dg_in_buffer_size;
}
dg = (struct vmci_datagram *)(dg_in_buffer +
bytes_to_skip);
}
remaining_bytes =
(size_t) (dg_in_buffer + current_dg_in_buffer_size -
(u8 *)dg);
if (remaining_bytes < VMCI_DG_HEADERSIZE) {
/* Get the next batch of datagrams. */
ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
vmci_dev->data_buffer,
current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
}
}
}
/*
* Scans the notification bitmap for raised flags, clears them
* and handles the notifications.
*/
static void vmci_process_bitmap(unsigned long data)
{
struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
if (!dev->notification_bitmap) {
dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
return;
}
vmci_dbell_scan_notification_entries(dev->notification_bitmap);
}
/*
* Enable MSI-X. Try exclusive vectors first, then shared vectors.
*/
static int vmci_enable_msix(struct pci_dev *pdev,
struct vmci_guest_device *vmci_dev)
{
int i;
int result;
for (i = 0; i < VMCI_MAX_INTRS; ++i) {
vmci_dev->msix_entries[i].entry = i;
vmci_dev->msix_entries[i].vector = i;
}
result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
if (result == 0)
vmci_dev->exclusive_vectors = true;
else if (result > 0)
result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
return result;
}
/*
* Interrupt handler for legacy or MSI interrupt, or for first MSI-X
* interrupt (vector VMCI_INTR_DATAGRAM).
*/
static irqreturn_t vmci_interrupt(int irq, void *_dev)
{
struct vmci_guest_device *dev = _dev;
/*
* If we are using MSI-X with exclusive vectors then we simply schedule
* the datagram tasklet, since we know the interrupt was meant for us.
* Otherwise we must read the ICR to determine what to do.
*/
if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
tasklet_schedule(&dev->datagram_tasklet);
} else {
unsigned int icr;
/* Acknowledge interrupt and determine what needs doing. */
icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
if (icr == 0 || icr == ~0)
return IRQ_NONE;
if (icr & VMCI_ICR_DATAGRAM) {
tasklet_schedule(&dev->datagram_tasklet);
icr &= ~VMCI_ICR_DATAGRAM;
}
if (icr & VMCI_ICR_NOTIFICATION) {
tasklet_schedule(&dev->bm_tasklet);
icr &= ~VMCI_ICR_NOTIFICATION;
}
if (icr != 0)
dev_warn(dev->dev,
"Ignoring unknown interrupt cause (%d)\n",
icr);
}
return IRQ_HANDLED;
}
/*
* Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
* which is for the notification bitmap. Will only get called if we are
* using MSI-X with exclusive vectors.
*/
static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
{
struct vmci_guest_device *dev = _dev;
/* For MSI-X we can just assume it was meant for us. */
tasklet_schedule(&dev->bm_tasklet);
return IRQ_HANDLED;
}
/*
* Most of the initialization at module load time is done here.
*/
static int vmci_guest_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct vmci_guest_device *vmci_dev;
void __iomem *iobase;
unsigned int capabilities;
unsigned long cmd;
int vmci_err;
int error;
dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
error = pcim_enable_device(pdev);
if (error) {
dev_err(&pdev->dev,
"Failed to enable VMCI device: %d\n", error);
return error;
}
error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
if (error) {
dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
return error;
}
iobase = pcim_iomap_table(pdev)[0];
dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
(unsigned long)iobase, pdev->irq);
vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
if (!vmci_dev) {
dev_err(&pdev->dev,
"Can't allocate memory for VMCI device\n");
return -ENOMEM;
}
vmci_dev->dev = &pdev->dev;
vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
vmci_dev->exclusive_vectors = false;
vmci_dev->iobase = iobase;
tasklet_init(&vmci_dev->datagram_tasklet,
vmci_dispatch_dgs, (unsigned long)vmci_dev);
tasklet_init(&vmci_dev->bm_tasklet,
vmci_process_bitmap, (unsigned long)vmci_dev);
vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
if (!vmci_dev->data_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram buffer\n");
return -ENOMEM;
}
pci_set_master(pdev); /* To enable queue_pair functionality. */
/*
* Verify that the VMCI Device supports the capabilities that
* we need. If the device is missing capabilities that we would
* like to use, check for fallback capabilities and use those
* instead (so we can run a new VM on old hosts). Fail the load if
* a required capability is missing and there is no fallback.
*
* Right now, we need datagrams. There are no fallbacks.
*/
capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO;
goto err_free_data_buffer;
}
/*
* If the hardware supports notifications, we will use that as
* well.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
if (!vmci_dev->notification_bitmap) {
dev_warn(&pdev->dev,
"Unable to allocate notification bitmap\n");
} else {
memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
capabilities |= VMCI_CAPS_NOTIFICATIONS;
}
}
dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
/* Let the host know which capabilities we intend to use. */
iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = vmci_dev;
spin_unlock_irq(&vmci_dev_spinlock);
/*
* Register notification bitmap with device if that capability is
* used.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
struct page *page =
vmalloc_to_page(vmci_dev->notification_bitmap);
unsigned long bitmap_ppn = page_to_pfn(page);
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%x\n",
(u32) bitmap_ppn);
goto err_remove_vmci_dev_g;
}
}
/* Check host capabilities. */
if (!vmci_check_host_caps(pdev))
goto err_remove_bitmap;
/* Enable device. */
/*
* We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
* update the internal context id when needed.
*/
vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
vmci_guest_cid_update, NULL,
&ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to subscribe to event (type=%d): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
/*
* Enable interrupts. Try MSI-X first, then MSI, and then fallback on
* legacy interrupts.
*/
if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
vmci_dev->irq = vmci_dev->msix_entries[0].vector;
} else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
vmci_dev->irq = pdev->irq;
} else {
vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
vmci_dev->irq = pdev->irq;
}
/*
* Request IRQ for legacy or MSI interrupts, or for first
* MSI-X vector.
*/
error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev, "Irq %u in use: %d\n",
vmci_dev->irq, error);
goto err_disable_msi;
}
/*
* For MSI-X with exclusive vectors we need to request an
* interrupt for each vector so that we get a separate
* interrupt handler routine. This allows us to distinguish
* between the vectors.
*/
if (vmci_dev->exclusive_vectors) {
error = request_irq(vmci_dev->msix_entries[1].vector,
vmci_interrupt_bm, 0, KBUILD_MODNAME,
vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
vmci_dev->msix_entries[1].vector, error);
goto err_free_irq;
}
}
dev_dbg(&pdev->dev, "Registered device\n");
atomic_inc(&vmci_num_guest_devices);
/* Enable specific interrupt bits. */
cmd = VMCI_IMR_DATAGRAM;
if (capabilities & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
/* Enable interrupts. */
iowrite32(VMCI_CONTROL_INT_ENABLE,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
return 0;
err_free_irq:
free_irq(vmci_dev->irq, &vmci_dev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
err_disable_msi:
if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
pci_disable_msix(pdev);
else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
pci_disable_msi(pdev);
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
err_remove_bitmap:
if (vmci_dev->notification_bitmap) {
iowrite32(VMCI_CONTROL_RESET,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
vfree(vmci_dev->notification_bitmap);
}
err_remove_vmci_dev_g:
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
err_free_data_buffer:
vfree(vmci_dev->data_buffer);
/* The rest are managed resources and will be freed by PCI core */
return error;
}
static void vmci_guest_remove_device(struct pci_dev *pdev)
{
struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
int vmci_err;
dev_dbg(&pdev->dev, "Removing device\n");
atomic_dec(&vmci_num_guest_devices);
vmci_qp_guest_endpoints_exit();
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
dev_dbg(&pdev->dev, "Resetting vmci device\n");
iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
/*
* Free IRQ and then disable MSI/MSI-X as appropriate. For
* MSI-X, we might have multiple vectors, each with their own
* IRQ, which we must free too.
*/
free_irq(vmci_dev->irq, vmci_dev);
if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
if (vmci_dev->exclusive_vectors)
free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
pci_disable_msix(pdev);
} else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
pci_disable_msi(pdev);
}
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
if (vmci_dev->notification_bitmap) {
/*
* The device reset above cleared the bitmap state of the
* device, so we can safely free it here.
*/
vfree(vmci_dev->notification_bitmap);
}
vfree(vmci_dev->data_buffer);
/* The rest are managed resources and will be freed by PCI core */
}
static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, vmci_ids);
static struct pci_driver vmci_guest_driver = {
.name = KBUILD_MODNAME,
.id_table = vmci_ids,
.probe = vmci_guest_probe_device,
.remove = vmci_guest_remove_device,
};
int __init vmci_guest_init(void)
{
return pci_register_driver(&vmci_guest_driver);
}
void __exit vmci_guest_exit(void)
{
pci_unregister_driver(&vmci_guest_driver);
}

View File

@ -0,0 +1,142 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/slab.h>
#include "vmci_handle_array.h"
static size_t handle_arr_calc_size(size_t capacity)
{
return sizeof(struct vmci_handle_arr) +
capacity * sizeof(struct vmci_handle);
}
struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
{
struct vmci_handle_arr *array;
if (capacity == 0)
capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
if (!array)
return NULL;
array->capacity = capacity;
array->size = 0;
return array;
}
void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
{
kfree(array);
}
void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
struct vmci_handle handle)
{
struct vmci_handle_arr *array = *array_ptr;
if (unlikely(array->size >= array->capacity)) {
/* reallocate. */
struct vmci_handle_arr *new_array;
size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
size_t new_size = handle_arr_calc_size(new_capacity);
new_array = krealloc(array, new_size, GFP_ATOMIC);
if (!new_array)
return;
new_array->capacity = new_capacity;
*array_ptr = array = new_array;
}
array->entries[array->size] = handle;
array->size++;
}
/*
* Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
*/
struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
size_t i;
for (i = 0; i < array->size; i++) {
if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
handle = array->entries[i];
array->size--;
array->entries[i] = array->entries[array->size];
array->entries[array->size] = VMCI_INVALID_HANDLE;
break;
}
}
return handle;
}
/*
* Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
*/
struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
if (array->size) {
array->size--;
handle = array->entries[array->size];
array->entries[array->size] = VMCI_INVALID_HANDLE;
}
return handle;
}
/*
* Handle at given index, VMCI_INVALID_HANDLE if invalid index.
*/
struct vmci_handle
vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
{
if (unlikely(index >= array->size))
return VMCI_INVALID_HANDLE;
return array->entries[index];
}
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
size_t i;
for (i = 0; i < array->size; i++)
if (vmci_handle_is_equal(array->entries[i], entry_handle))
return true;
return false;
}
/*
* NULL if the array is empty. Otherwise, a pointer to the array
* of VMCI handles in the handle array.
*/
struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
{
if (array->size)
return array->entries;
return NULL;
}

View File

@ -0,0 +1,52 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_HANDLE_ARRAY_H_
#define _VMCI_HANDLE_ARRAY_H_
#include <linux/vmw_vmci_defs.h>
#include <linux/types.h>
#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
struct vmci_handle_arr {
size_t capacity;
size_t size;
struct vmci_handle entries[];
};
struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
struct vmci_handle handle);
struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle
entry_handle);
struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
struct vmci_handle
vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle);
struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
static inline size_t vmci_handle_arr_get_size(
const struct vmci_handle_arr *array)
{
return array->size;
}
#endif /* _VMCI_HANDLE_ARRAY_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,191 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_QUEUE_PAIR_H_
#define _VMCI_QUEUE_PAIR_H_
#include <linux/vmw_vmci_defs.h>
#include <linux/types.h>
#include "vmci_context.h"
/* Callback needed for correctly waiting on events. */
typedef int (*vmci_event_release_cb) (void *client_data);
/* Guest device port I/O. */
struct ppn_set {
u64 num_produce_pages;
u64 num_consume_pages;
u32 *produce_ppns;
u32 *consume_ppns;
bool initialized;
};
/* VMCIqueue_pairAllocInfo */
struct vmci_qp_alloc_info {
struct vmci_handle handle;
u32 peer;
u32 flags;
u64 produce_size;
u64 consume_size;
u64 ppn_va; /* Start VA of queue pair PPNs. */
u64 num_ppns;
s32 result;
u32 version;
};
/* VMCIqueue_pairSetVAInfo */
struct vmci_qp_set_va_info {
struct vmci_handle handle;
u64 va; /* Start VA of queue pair PPNs. */
u64 num_ppns;
u32 version;
s32 result;
};
/*
* For backwards compatibility, here is a version of the
* VMCIqueue_pairPageFileInfo before host support end-points was added.
* Note that the current version of that structure requires VMX to
* pass down the VA of the mapped file. Before host support was added
* there was nothing of the sort. So, when the driver sees the ioctl
* with a parameter that is the sizeof
* VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
* of VMX running can't attach to host end points because it doesn't
* provide the VA of the mapped files.
*
* The Linux driver doesn't get an indication of the size of the
* structure passed down from user space. So, to fix a long standing
* but unfiled bug, the _pad field has been renamed to version.
* Existing versions of VMX always initialize the PageFileInfo
* structure so that _pad, er, version is set to 0.
*
* A version value of 1 indicates that the size of the structure has
* been increased to include two UVA's: produce_uva and consume_uva.
* These UVA's are of the mmap()'d queue contents backing files.
*
* In addition, if when VMX is sending down the
* VMCIqueue_pairPageFileInfo structure it gets an error then it will
* try again with the _NoHostQP version of the file to see if an older
* VMCI kernel module is running.
*/
/* VMCIqueue_pairPageFileInfo */
struct vmci_qp_page_file_info {
struct vmci_handle handle;
u64 produce_page_file; /* User VA. */
u64 consume_page_file; /* User VA. */
u64 produce_page_file_size; /* Size of the file name array. */
u64 consume_page_file_size; /* Size of the file name array. */
s32 result;
u32 version; /* Was _pad. */
u64 produce_va; /* User VA of the mapped file. */
u64 consume_va; /* User VA of the mapped file. */
};
/* vmci queuepair detach info */
struct vmci_qp_dtch_info {
struct vmci_handle handle;
s32 result;
u32 _pad;
};
/*
* struct vmci_qp_page_store describes how the memory of a given queue pair
* is backed. When the queue pair is between the host and a guest, the
* page store consists of references to the guest pages. On vmkernel,
* this is a list of PPNs, and on hosted, it is a user VA where the
* queue pair is mapped into the VMX address space.
*/
struct vmci_qp_page_store {
/* Reference to pages backing the queue pair. */
u64 pages;
/* Length of pageList/virtual addres range (in pages). */
u32 len;
};
/*
* This data type contains the information about a queue.
* There are two queues (hence, queue pairs) per transaction model between a
* pair of end points, A & B. One queue is used by end point A to transmit
* commands and responses to B. The other queue is used by B to transmit
* commands and responses.
*
* struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains
* either a direct pointer to the linear address of the buffer contents or a
* pointer to structures which help the OS locate those data pages. See
* vmciKernelIf.c for each platform for its definition.
*/
struct vmci_queue {
struct vmci_queue_header *q_header;
struct vmci_queue_header *saved_header;
struct vmci_queue_kern_if *kernel_if;
};
/*
* Utility function that checks whether the fields of the page
* store contain valid values.
* Result:
* true if the page store is wellformed. false otherwise.
*/
static inline bool
VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
{
return page_store->len >= 2;
}
/*
* Helper function to check if the non-blocking flag
* is set for a given queue pair.
*/
static inline bool vmci_can_block(u32 flags)
{
return !(flags & VMCI_QPFLAG_NONBLOCK);
}
/*
* Helper function to check if the queue pair is pinned
* into memory.
*/
static inline bool vmci_qp_pinned(u32 flags)
{
return flags & VMCI_QPFLAG_PINNED;
}
void vmci_qp_broker_exit(void);
int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
u32 flags, u32 priv_flags,
u64 produce_size, u64 consume_size,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context);
int vmci_qp_broker_set_page_store(struct vmci_handle handle,
u64 produce_uva, u64 consume_uva,
struct vmci_ctx *context);
int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
void vmci_qp_guest_endpoints_exit(void);
int vmci_qp_alloc(struct vmci_handle *handle,
struct vmci_queue **produce_q, u64 produce_size,
struct vmci_queue **consume_q, u64 consume_size,
u32 peer, u32 flags, u32 priv_flags,
bool guest_endpoint, vmci_event_release_cb wakeup_cb,
void *client_data);
int vmci_qp_broker_map(struct vmci_handle handle,
struct vmci_ctx *context, u64 guest_mem);
int vmci_qp_broker_unmap(struct vmci_handle handle,
struct vmci_ctx *context, u32 gid);
#endif /* _VMCI_QUEUE_PAIR_H_ */

View File

@ -0,0 +1,229 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/hash.h>
#include <linux/types.h>
#include <linux/rculist.h>
#include "vmci_resource.h"
#include "vmci_driver.h"
#define VMCI_RESOURCE_HASH_BITS 7
#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
struct vmci_hash_table {
spinlock_t lock;
struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
};
static struct vmci_hash_table vmci_resource_table = {
.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
};
static unsigned int vmci_resource_hash(struct vmci_handle handle)
{
return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
}
/*
* Gets a resource (if one exists) matching given handle from the hash table.
*/
static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
enum vmci_resource_type type)
{
struct vmci_resource *r, *resource = NULL;
struct hlist_node *node;
unsigned int idx = vmci_resource_hash(handle);
rcu_read_lock();
hlist_for_each_entry_rcu(r, node,
&vmci_resource_table.entries[idx], node) {
u32 cid = r->handle.context;
u32 rid = r->handle.resource;
if (r->type == type &&
rid == handle.resource &&
(cid == handle.context || cid == VMCI_INVALID_ID)) {
resource = r;
break;
}
}
rcu_read_unlock();
return resource;
}
/*
* Find an unused resource ID and return it. The first
* VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
* its value + 1.
* Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
*/
static u32 vmci_resource_find_id(u32 context_id,
enum vmci_resource_type resource_type)
{
static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
u32 old_rid = resource_id;
u32 current_rid;
/*
* Generate a unique resource ID. Keep on trying until we wrap around
* in the RID space.
*/
do {
struct vmci_handle handle;
current_rid = resource_id;
resource_id++;
if (unlikely(resource_id == VMCI_INVALID_ID)) {
/* Skip the reserved rids. */
resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
}
handle = vmci_make_handle(context_id, current_rid);
if (!vmci_resource_lookup(handle, resource_type))
return current_rid;
} while (resource_id != old_rid);
return VMCI_INVALID_ID;
}
int vmci_resource_add(struct vmci_resource *resource,
enum vmci_resource_type resource_type,
struct vmci_handle handle)
{
unsigned int idx;
int result;
spin_lock(&vmci_resource_table.lock);
if (handle.resource == VMCI_INVALID_ID) {
handle.resource = vmci_resource_find_id(handle.context,
resource_type);
if (handle.resource == VMCI_INVALID_ID) {
result = VMCI_ERROR_NO_HANDLE;
goto out;
}
} else if (vmci_resource_lookup(handle, resource_type)) {
result = VMCI_ERROR_ALREADY_EXISTS;
goto out;
}
resource->handle = handle;
resource->type = resource_type;
INIT_HLIST_NODE(&resource->node);
kref_init(&resource->kref);
init_completion(&resource->done);
idx = vmci_resource_hash(resource->handle);
hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
result = VMCI_SUCCESS;
out:
spin_unlock(&vmci_resource_table.lock);
return result;
}
void vmci_resource_remove(struct vmci_resource *resource)
{
struct vmci_handle handle = resource->handle;
unsigned int idx = vmci_resource_hash(handle);
struct vmci_resource *r;
struct hlist_node *node;
/* Remove resource from hash table. */
spin_lock(&vmci_resource_table.lock);
hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
if (vmci_handle_is_equal(r->handle, resource->handle)) {
hlist_del_init_rcu(&r->node);
break;
}
}
spin_unlock(&vmci_resource_table.lock);
synchronize_rcu();
vmci_resource_put(resource);
wait_for_completion(&resource->done);
}
struct vmci_resource *
vmci_resource_by_handle(struct vmci_handle resource_handle,
enum vmci_resource_type resource_type)
{
struct vmci_resource *r, *resource = NULL;
rcu_read_lock();
r = vmci_resource_lookup(resource_handle, resource_type);
if (r &&
(resource_type == r->type ||
resource_type == VMCI_RESOURCE_TYPE_ANY)) {
resource = vmci_resource_get(r);
}
rcu_read_unlock();
return resource;
}
/*
* Get a reference to given resource.
*/
struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
{
kref_get(&resource->kref);
return resource;
}
static void vmci_release_resource(struct kref *kref)
{
struct vmci_resource *resource =
container_of(kref, struct vmci_resource, kref);
/* Verify the resource has been unlinked from hash table */
WARN_ON(!hlist_unhashed(&resource->node));
/* Signal that container of this resource can now be destroyed */
complete(&resource->done);
}
/*
* Resource's release function will get called if last reference.
* If it is the last reference, then we are sure that nobody else
* can increment the count again (it's gone from the resource hash
* table), so there's no need for locking here.
*/
int vmci_resource_put(struct vmci_resource *resource)
{
/*
* We propagate the information back to caller in case it wants to know
* whether entry was freed.
*/
return kref_put(&resource->kref, vmci_release_resource) ?
VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
}
struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
{
return resource->handle;
}

View File

@ -0,0 +1,59 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_RESOURCE_H_
#define _VMCI_RESOURCE_H_
#include <linux/vmw_vmci_defs.h>
#include <linux/types.h>
#include "vmci_context.h"
enum vmci_resource_type {
VMCI_RESOURCE_TYPE_ANY,
VMCI_RESOURCE_TYPE_API,
VMCI_RESOURCE_TYPE_GROUP,
VMCI_RESOURCE_TYPE_DATAGRAM,
VMCI_RESOURCE_TYPE_DOORBELL,
VMCI_RESOURCE_TYPE_QPAIR_GUEST,
VMCI_RESOURCE_TYPE_QPAIR_HOST
};
struct vmci_resource {
struct vmci_handle handle;
enum vmci_resource_type type;
struct hlist_node node;
struct kref kref;
struct completion done;
};
int vmci_resource_add(struct vmci_resource *resource,
enum vmci_resource_type resource_type,
struct vmci_handle handle);
void vmci_resource_remove(struct vmci_resource *resource);
struct vmci_resource *
vmci_resource_by_handle(struct vmci_handle resource_handle,
enum vmci_resource_type resource_type);
struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
int vmci_resource_put(struct vmci_resource *resource);
struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
#endif /* _VMCI_RESOURCE_H_ */

View File

@ -0,0 +1,226 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_route.h"
/*
* Make a routing decision for the given source and destination handles.
* This will try to determine the route using the handles and the available
* devices. Will set the source context if it is invalid.
*/
int vmci_route(struct vmci_handle *src,
const struct vmci_handle *dst,
bool from_guest,
enum vmci_route *route)
{
bool has_host_device = vmci_host_code_active();
bool has_guest_device = vmci_guest_code_active();
*route = VMCI_ROUTE_NONE;
/*
* "from_guest" is only ever set to true by
* IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
* which comes from the VMX, so we know it is coming from a
* guest.
*
* To avoid inconsistencies, test these once. We will test
* them again when we do the actual send to ensure that we do
* not touch a non-existent device.
*/
/* Must have a valid destination context. */
if (VMCI_INVALID_ID == dst->context)
return VMCI_ERROR_INVALID_ARGS;
/* Anywhere to hypervisor. */
if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
/*
* If this message already came from a guest then we
* cannot send it to the hypervisor. It must come
* from a local client.
*/
if (from_guest)
return VMCI_ERROR_DST_UNREACHABLE;
/*
* We must be acting as a guest in order to send to
* the hypervisor.
*/
if (!has_guest_device)
return VMCI_ERROR_DEVICE_NOT_FOUND;
/* And we cannot send if the source is the host context. */
if (VMCI_HOST_CONTEXT_ID == src->context)
return VMCI_ERROR_INVALID_ARGS;
/*
* If the client passed the ANON source handle then
* respect it (both context and resource are invalid).
* However, if they passed only an invalid context,
* then they probably mean ANY, in which case we
* should set the real context here before passing it
* down.
*/
if (VMCI_INVALID_ID == src->context &&
VMCI_INVALID_ID != src->resource)
src->context = vmci_get_context_id();
/* Send from local client down to the hypervisor. */
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}
/* Anywhere to local client on host. */
if (VMCI_HOST_CONTEXT_ID == dst->context) {
/*
* If it is not from a guest but we are acting as a
* guest, then we need to send it down to the host.
* Note that if we are also acting as a host then this
* will prevent us from sending from local client to
* local client, but we accept that restriction as a
* way to remove any ambiguity from the host context.
*/
if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
/*
* If the hypervisor is the source, this is
* host local communication. The hypervisor
* may send vmci event datagrams to the host
* itself, but it will never send datagrams to
* an "outer host" through the guest device.
*/
if (has_host_device) {
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
} else {
return VMCI_ERROR_DEVICE_NOT_FOUND;
}
}
if (!from_guest && has_guest_device) {
/* If no source context then use the current. */
if (VMCI_INVALID_ID == src->context)
src->context = vmci_get_context_id();
/* Send it from local client down to the host. */
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}
/*
* Otherwise we already received it from a guest and
* it is destined for a local client on this host, or
* it is from another local client on this host. We
* must be acting as a host to service it.
*/
if (!has_host_device)
return VMCI_ERROR_DEVICE_NOT_FOUND;
if (VMCI_INVALID_ID == src->context) {
/*
* If it came from a guest then it must have a
* valid context. Otherwise we can use the
* host context.
*/
if (from_guest)
return VMCI_ERROR_INVALID_ARGS;
src->context = VMCI_HOST_CONTEXT_ID;
}
/* Route to local client. */
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
}
/*
* If we are acting as a host then this might be destined for
* a guest.
*/
if (has_host_device) {
/* It will have a context if it is meant for a guest. */
if (vmci_ctx_exists(dst->context)) {
if (VMCI_INVALID_ID == src->context) {
/*
* If it came from a guest then it
* must have a valid context.
* Otherwise we can use the host
* context.
*/
if (from_guest)
return VMCI_ERROR_INVALID_ARGS;
src->context = VMCI_HOST_CONTEXT_ID;
} else if (VMCI_CONTEXT_IS_VM(src->context) &&
src->context != dst->context) {
/*
* VM to VM communication is not
* allowed. Since we catch all
* communication destined for the host
* above, this must be destined for a
* VM since there is a valid context.
*/
return VMCI_ERROR_DST_UNREACHABLE;
}
/* Pass it up to the guest. */
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
} else if (!has_guest_device) {
/*
* The host is attempting to reach a CID
* without an active context, and we can't
* send it down, since we have no guest
* device.
*/
return VMCI_ERROR_DST_UNREACHABLE;
}
}
/*
* We must be a guest trying to send to another guest, which means
* we need to send it down to the host. We do not filter out VM to
* VM communication here, since we want to be able to use the guest
* driver on older versions that do support VM to VM communication.
*/
if (!has_guest_device) {
/*
* Ending up here means we have neither guest nor host
* device.
*/
return VMCI_ERROR_DEVICE_NOT_FOUND;
}
/* If no source context then use the current context. */
if (VMCI_INVALID_ID == src->context)
src->context = vmci_get_context_id();
/*
* Send it from local client down to the host, which will
* route it to the other guest for us.
*/
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}

View File

@ -0,0 +1,30 @@
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _VMCI_ROUTE_H_
#define _VMCI_ROUTE_H_
#include <linux/vmw_vmci_defs.h>
enum vmci_route {
VMCI_ROUTE_NONE,
VMCI_ROUTE_AS_HOST,
VMCI_ROUTE_AS_GUEST,
};
int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
bool from_guest, enum vmci_route *route);
#endif /* _VMCI_ROUTE_H_ */

View File

@ -457,7 +457,7 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
depends on PCI && GENERIC_HARDIRQS
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash

View File

@ -188,6 +188,10 @@ config NETPOLL_TRAP
config NET_POLL_CONTROLLER
def_bool NETPOLL
config NTB_NETDEV
tristate "Virtual Ethernet over NTB"
depends on NTB
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
depends on RAPIDIO

View File

@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o

View File

@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
{ VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
{ HV_NIC_GUID, },
{ },
};

408
drivers/net/ntb_netdev.c Normal file
View File

@ -0,0 +1,408 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Network Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
*/
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ntb.h>
#define NTB_NETDEV_VER "0.7"
MODULE_DESCRIPTION(KBUILD_MODNAME);
MODULE_VERSION(NTB_NETDEV_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
struct ntb_netdev {
struct list_head list;
struct pci_dev *pdev;
struct net_device *ndev;
struct ntb_transport_qp *qp;
};
#define NTB_TX_TIMEOUT_MS 1000
#define NTB_RXQ_SIZE 100
static LIST_HEAD(dev_list);
static void ntb_netdev_event_handler(void *data, int status)
{
struct net_device *ndev = data;
struct ntb_netdev *dev = netdev_priv(ndev);
netdev_dbg(ndev, "Event %x, Link %x\n", status,
ntb_transport_link_query(dev->qp));
/* Currently, only link status event is supported */
if (status)
netif_carrier_on(ndev);
else
netif_carrier_off(ndev);
}
static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len)
{
struct net_device *ndev = qp_data;
struct sk_buff *skb;
int rc;
skb = data;
if (!skb)
return;
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
if (netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
}
skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
if (!skb) {
ndev->stats.rx_errors++;
ndev->stats.rx_frame_errors++;
return;
}
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
ndev->stats.rx_errors++;
ndev->stats.rx_fifo_errors++;
}
}
static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len)
{
struct net_device *ndev = qp_data;
struct sk_buff *skb;
skb = data;
if (!skb || !ndev)
return;
if (len > 0) {
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
} else {
ndev->stats.tx_errors++;
ndev->stats.tx_aborted_errors++;
}
dev_kfree_skb(skb);
}
static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct ntb_netdev *dev = netdev_priv(ndev);
int rc;
netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
if (rc)
goto err;
return NETDEV_TX_OK;
err:
ndev->stats.tx_dropped++;
ndev->stats.tx_errors++;
return NETDEV_TX_BUSY;
}
static int ntb_netdev_open(struct net_device *ndev)
{
struct ntb_netdev *dev = netdev_priv(ndev);
struct sk_buff *skb;
int rc, i, len;
/* Add some empty rx bufs */
for (i = 0; i < NTB_RXQ_SIZE; i++) {
skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
if (!skb) {
rc = -ENOMEM;
goto err;
}
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
if (rc == -EINVAL)
goto err;
}
netif_carrier_off(ndev);
ntb_transport_link_up(dev->qp);
return 0;
err:
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
dev_kfree_skb(skb);
return rc;
}
static int ntb_netdev_close(struct net_device *ndev)
{
struct ntb_netdev *dev = netdev_priv(ndev);
struct sk_buff *skb;
int len;
ntb_transport_link_down(dev->qp);
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
dev_kfree_skb(skb);
return 0;
}
static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ntb_netdev *dev = netdev_priv(ndev);
struct sk_buff *skb;
int len, rc;
if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
return -EINVAL;
if (!netif_running(ndev)) {
ndev->mtu = new_mtu;
return 0;
}
/* Bring down the link and dispose of posted rx entries */
ntb_transport_link_down(dev->qp);
if (ndev->mtu < new_mtu) {
int i;
for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
dev_kfree_skb(skb);
for (; i; i--) {
skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
if (!skb) {
rc = -ENOMEM;
goto err;
}
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
new_mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
goto err;
}
}
}
ndev->mtu = new_mtu;
ntb_transport_link_up(dev->qp);
return 0;
err:
ntb_transport_link_down(dev->qp);
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
dev_kfree_skb(skb);
netdev_err(ndev, "Error changing MTU, device inoperable\n");
return rc;
}
static const struct net_device_ops ntb_netdev_ops = {
.ndo_open = ntb_netdev_open,
.ndo_stop = ntb_netdev_close,
.ndo_start_xmit = ntb_netdev_start_xmit,
.ndo_change_mtu = ntb_netdev_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
static void ntb_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct ntb_netdev *dev = netdev_priv(ndev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = SUPPORTED_Backplane;
cmd->advertising = ADVERTISED_Backplane;
cmd->speed = SPEED_UNKNOWN;
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_OTHER;
cmd->phy_address = 0;
cmd->transceiver = XCVR_DUMMY1;
cmd->autoneg = AUTONEG_ENABLE;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static const struct ethtool_ops ntb_ethtool_ops = {
.get_drvinfo = ntb_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_settings = ntb_get_settings,
};
static const struct ntb_queue_handlers ntb_netdev_handlers = {
.tx_handler = ntb_netdev_tx_handler,
.rx_handler = ntb_netdev_rx_handler,
.event_handler = ntb_netdev_event_handler,
};
static int ntb_netdev_probe(struct pci_dev *pdev)
{
struct net_device *ndev;
struct ntb_netdev *dev;
int rc;
ndev = alloc_etherdev(sizeof(struct ntb_netdev));
if (!ndev)
return -ENOMEM;
dev = netdev_priv(ndev);
dev->ndev = ndev;
dev->pdev = pdev;
BUG_ON(!dev->pdev);
ndev->features = NETIF_F_HIGHDMA;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ndev->hw_features = ndev->features;
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
random_ether_addr(ndev->perm_addr);
memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
ndev->netdev_ops = &ntb_netdev_ops;
SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
if (!dev->qp) {
rc = -EIO;
goto err;
}
ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
rc = register_netdev(ndev);
if (rc)
goto err1;
list_add(&dev->list, &dev_list);
dev_info(&pdev->dev, "%s created\n", ndev->name);
return 0;
err1:
ntb_transport_free_queue(dev->qp);
err:
free_netdev(ndev);
return rc;
}
static void ntb_netdev_remove(struct pci_dev *pdev)
{
struct net_device *ndev;
struct ntb_netdev *dev;
list_for_each_entry(dev, &dev_list, list) {
if (dev->pdev == pdev)
break;
}
if (dev == NULL)
return;
ndev = dev->ndev;
unregister_netdev(ndev);
ntb_transport_free_queue(dev->qp);
free_netdev(ndev);
}
static struct ntb_client ntb_netdev_client = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.probe = ntb_netdev_probe,
.remove = ntb_netdev_remove,
};
static int __init ntb_netdev_init_module(void)
{
int rc;
rc = ntb_register_client_dev(KBUILD_MODNAME);
if (rc)
return rc;
return ntb_register_client(&ntb_netdev_client);
}
module_init(ntb_netdev_init_module);
static void __exit ntb_netdev_exit_module(void)
{
ntb_unregister_client(&ntb_netdev_client);
ntb_unregister_client_dev(KBUILD_MODNAME);
}
module_exit(ntb_netdev_exit_module);

13
drivers/ntb/Kconfig Normal file
View File

@ -0,0 +1,13 @@
config NTB
tristate "Intel Non-Transparent Bridge support"
depends on PCI
depends on X86_64
help
The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
connecting 2 systems. When configured, writes to the device's PCI
mapped memory will be mirrored to a buffer on the remote system. The
ntb Linux driver uses this point-to-point communication as a method to
transfer data from one system to the other.
If unsure, say N.

3
drivers/ntb/Makefile Normal file
View File

@ -0,0 +1,3 @@
obj-$(CONFIG_NTB) += ntb.o
ntb-objs := ntb_hw.o ntb_transport.o

1141
drivers/ntb/ntb_hw.c Normal file

File diff suppressed because it is too large Load Diff

181
drivers/ntb/ntb_hw.h Normal file
View File

@ -0,0 +1,181 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
*/
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF 0x3726
#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF 0x3727
#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB 0x3C08
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB 0x3C0E
#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB 0x3C0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
#define NTB_BAR_MMIO 0
#define NTB_BAR_23 2
#define NTB_BAR_45 4
#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
(1 << NTB_BAR_45))
#define NTB_LINK_DOWN 0
#define NTB_LINK_UP 1
#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
#define NTB_NUM_MW 2
enum ntb_hw_event {
NTB_EVENT_SW_EVENT0 = 0,
NTB_EVENT_SW_EVENT1,
NTB_EVENT_SW_EVENT2,
NTB_EVENT_HW_ERROR,
NTB_EVENT_HW_LINK_UP,
NTB_EVENT_HW_LINK_DOWN,
};
struct ntb_mw {
dma_addr_t phys_addr;
void __iomem *vbase;
resource_size_t bar_sz;
};
struct ntb_db_cb {
void (*callback) (void *data, int db_num);
unsigned int db_num;
void *data;
struct ntb_device *ndev;
};
struct ntb_device {
struct pci_dev *pdev;
struct msix_entry *msix_entries;
void __iomem *reg_base;
struct ntb_mw mw[NTB_NUM_MW];
struct {
unsigned int max_spads;
unsigned int max_db_bits;
unsigned int msix_cnt;
} limits;
struct {
void __iomem *pdb;
void __iomem *pdb_mask;
void __iomem *sdb;
void __iomem *sbar2_xlat;
void __iomem *sbar4_xlat;
void __iomem *spad_write;
void __iomem *spad_read;
void __iomem *lnk_cntl;
void __iomem *lnk_stat;
void __iomem *spci_cmd;
} reg_ofs;
struct ntb_transport *ntb_transport;
void (*event_cb)(void *handle, enum ntb_hw_event event);
struct ntb_db_cb *db_cb;
unsigned char hw_type;
unsigned char conn_type;
unsigned char dev_type;
unsigned char num_msix;
unsigned char bits_per_vector;
unsigned char max_cbs;
unsigned char link_status;
struct delayed_work hb_timer;
unsigned long last_ts;
};
/**
* ntb_hw_link_status() - return the hardware link status
* @ndev: pointer to ntb_device instance
*
* Returns true if the hardware is connected to the remote system
*
* RETURNS: true or false based on the hardware link state
*/
static inline bool ntb_hw_link_status(struct ntb_device *ndev)
{
return ndev->link_status == NTB_LINK_UP;
}
/**
* ntb_query_pdev() - return the pci_dev pointer
* @ndev: pointer to ntb_device instance
*
* Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
*
* RETURNS: a pointer to the ntb pci_dev
*/
static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
{
return ndev->pdev;
}
struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
void *transport);
void ntb_unregister_transport(struct ntb_device *ndev);
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
void *data, void (*db_cb_func) (void *data,
int db_num));
void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
int ntb_register_event_callback(struct ntb_device *ndev,
void (*event_cb_func) (void *handle,
enum ntb_hw_event event));
void ntb_unregister_event_callback(struct ntb_device *ndev);
int ntb_get_max_spads(struct ntb_device *ndev);
int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
void *ntb_find_transport(struct pci_dev *pdev);
int ntb_transport_init(struct pci_dev *pdev);
void ntb_transport_free(void *transport);

139
drivers/ntb/ntb_regs.h Normal file
View File

@ -0,0 +1,139 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
*
* Contact Information:
* Jon Mason <jon.mason@intel.com>
*/
#define NTB_LINK_ENABLE 0x0000
#define NTB_LINK_DISABLE 0x0002
#define NTB_LINK_STATUS_ACTIVE 0x2000
#define NTB_LINK_SPEED_MASK 0x000f
#define NTB_LINK_WIDTH_MASK 0x03f0
#define SNB_MSIX_CNT 4
#define SNB_MAX_SPADS 16
#define SNB_MAX_COMPAT_SPADS 8
/* Reserve the uppermost bit for link interrupt */
#define SNB_MAX_DB_BITS 15
#define SNB_DB_BITS_PER_VEC 5
#define SNB_DB_HW_LINK 0x8000
#define SNB_PCICMD_OFFSET 0x0504
#define SNB_DEVCTRL_OFFSET 0x0598
#define SNB_LINK_STATUS_OFFSET 0x01A2
#define SNB_PBAR2LMT_OFFSET 0x0000
#define SNB_PBAR4LMT_OFFSET 0x0008
#define SNB_PBAR2XLAT_OFFSET 0x0010
#define SNB_PBAR4XLAT_OFFSET 0x0018
#define SNB_SBAR2LMT_OFFSET 0x0020
#define SNB_SBAR4LMT_OFFSET 0x0028
#define SNB_SBAR2XLAT_OFFSET 0x0030
#define SNB_SBAR4XLAT_OFFSET 0x0038
#define SNB_SBAR0BASE_OFFSET 0x0040
#define SNB_SBAR2BASE_OFFSET 0x0048
#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_NTBCNTL_OFFSET 0x0058
#define SNB_SBDF_OFFSET 0x005C
#define SNB_PDOORBELL_OFFSET 0x0060
#define SNB_PDBMSK_OFFSET 0x0062
#define SNB_SDOORBELL_OFFSET 0x0064
#define SNB_SDBMSK_OFFSET 0x0066
#define SNB_USMEMMISS 0x0070
#define SNB_SPAD_OFFSET 0x0080
#define SNB_SPADSEMA4_OFFSET 0x00c0
#define SNB_WCCNTRL_OFFSET 0x00e0
#define SNB_B2B_SPAD_OFFSET 0x0100
#define SNB_B2B_DOORBELL_OFFSET 0x0140
#define SNB_B2B_XLAT_OFFSET 0x0144
#define BWD_MSIX_CNT 34
#define BWD_MAX_SPADS 16
#define BWD_MAX_COMPAT_SPADS 16
#define BWD_MAX_DB_BITS 34
#define BWD_DB_BITS_PER_VEC 1
#define BWD_PCICMD_OFFSET 0xb004
#define BWD_MBAR23_OFFSET 0xb018
#define BWD_MBAR45_OFFSET 0xb020
#define BWD_DEVCTRL_OFFSET 0xb048
#define BWD_LINK_STATUS_OFFSET 0xb052
#define BWD_SBAR2XLAT_OFFSET 0x0008
#define BWD_SBAR4XLAT_OFFSET 0x0010
#define BWD_PDOORBELL_OFFSET 0x0020
#define BWD_PDBMSK_OFFSET 0x0028
#define BWD_NTBCNTL_OFFSET 0x0060
#define BWD_EBDF_OFFSET 0x0064
#define BWD_SPAD_OFFSET 0x0080
#define BWD_SPADSEMA_OFFSET 0x00c0
#define BWD_STKYSPAD_OFFSET 0x00c4
#define BWD_PBAR2XLAT_OFFSET 0x8008
#define BWD_PBAR4XLAT_OFFSET 0x8010
#define BWD_B2B_DOORBELL_OFFSET 0x8020
#define BWD_B2B_SPAD_OFFSET 0x8080
#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
#define NTB_CNTL_BAR23_SNOOP (1 << 2)
#define NTB_CNTL_BAR45_SNOOP (1 << 6)
#define BWD_CNTL_LINK_DOWN (1 << 16)
#define NTB_PPD_OFFSET 0x00D4
#define SNB_PPD_CONN_TYPE 0x0003
#define SNB_PPD_DEV_TYPE 0x0010
#define BWD_PPD_INIT_LINK 0x0008
#define BWD_PPD_CONN_TYPE 0x0300
#define BWD_PPD_DEV_TYPE 0x1000
#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
#define BWD_MBAR23_USD_ADDR 0x000000410000000C
#define BWD_MBAR45_USD_ADDR 0x000000810000000C
#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
#define BWD_MBAR23_DSD_ADDR 0x000000400000000C
#define BWD_MBAR45_DSD_ADDR 0x000000800000000C

1441
drivers/ntb/ntb_transport.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -133,8 +133,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
goto err_out_free_res;
}
pci_set_drvdata(dev, &sockets[i].socket);
for (i = 0; i<socket_count; i++) {
sockets[i].socket.dev.parent = &dev->dev;
sockets[i].socket.ops = &i82092aa_operations;
@ -164,14 +162,14 @@ err_out_disable:
static void i82092aa_pci_remove(struct pci_dev *dev)
{
struct pcmcia_socket *socket = pci_get_drvdata(dev);
int i;
enter("i82092aa_pci_remove");
free_irq(dev->irq, i82092aa_interrupt);
if (socket)
pcmcia_unregister_socket(socket);
for (i = 0; i < socket_count; i++)
pcmcia_unregister_socket(&sockets[i].socket);
leave("i82092aa_pci_remove");
}

View File

@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
socket = &vrc4171_sockets[slot];
socket->csc_irq = search_nonuse_irq();
socket->io_irq = search_nonuse_irq();
spin_lock_init(&socket->lock);
return 0;
}

View File

@ -1410,13 +1410,13 @@ enum {
static const struct hv_vmbus_device_id id_table[] = {
/* SCSI guid */
{ VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
.driver_data = SCSI_GUID },
{ HV_SCSI_GUID,
.driver_data = SCSI_GUID
},
/* IDE guid */
{ VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
.driver_data = IDE_GUID },
{ HV_IDE_GUID,
.driver_data = IDE_GUID
},
{ },
};

View File

@ -1376,6 +1376,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
return 0;
err_reg:
put_device(&vdev->dev);
kfree(vdev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {

View File

@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
@ -459,43 +460,34 @@ static int ds1wm_probe(struct platform_device *pdev)
if (!pdev)
return -ENODEV;
ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
if (!ds1wm_data)
return -ENOMEM;
platform_set_drvdata(pdev, ds1wm_data);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENXIO;
goto err0;
}
ds1wm_data->map = ioremap(res->start, resource_size(res));
if (!ds1wm_data->map) {
ret = -ENOMEM;
goto err0;
}
if (!res)
return -ENXIO;
ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!ds1wm_data->map)
return -ENOMEM;
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
ds1wm_data->pdev = pdev;
ds1wm_data->cell = mfd_get_cell(pdev);
if (!ds1wm_data->cell) {
ret = -ENODEV;
goto err1;
}
if (!ds1wm_data->cell)
return -ENODEV;
plat = pdev->dev.platform_data;
if (!plat) {
ret = -ENODEV;
goto err1;
}
if (!plat)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
ret = -ENXIO;
goto err1;
}
if (!res)
return -ENXIO;
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@ -505,10 +497,10 @@ static int ds1wm_probe(struct platform_device *pdev)
if (res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
ret = request_irq(ds1wm_data->irq, ds1wm_isr,
ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
if (ret)
goto err1;
return ret;
ds1wm_up(ds1wm_data);
@ -516,17 +508,12 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = w1_add_master_device(&ds1wm_master);
if (ret)
goto err2;
goto err;
return 0;
err2:
err:
ds1wm_down(ds1wm_data);
free_irq(ds1wm_data->irq, ds1wm_data);
err1:
iounmap(ds1wm_data->map);
err0:
kfree(ds1wm_data);
return ret;
}
@ -560,9 +547,6 @@ static int ds1wm_remove(struct platform_device *pdev)
w1_remove_master_device(&ds1wm_master);
ds1wm_down(ds1wm_data);
free_irq(ds1wm_data->irq, ds1wm_data);
iounmap(ds1wm_data->map);
kfree(ds1wm_data);
return 0;
}

View File

@ -51,10 +51,10 @@
* The top 4 bits always read 0.
* To write, the top nibble must be the 1's compl. of the low nibble.
*/
#define DS2482_REG_CFG_1WS 0x08
#define DS2482_REG_CFG_SPU 0x04
#define DS2482_REG_CFG_PPM 0x02
#define DS2482_REG_CFG_APU 0x01
#define DS2482_REG_CFG_1WS 0x08 /* 1-wire speed */
#define DS2482_REG_CFG_SPU 0x04 /* strong pull-up */
#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/**
@ -131,6 +131,17 @@ struct ds2482_data {
};
/**
* Helper to calculate values for configuration register
* @param conf the raw config value
* @return the value w/ complements that can be written to register
*/
static inline u8 ds2482_calculate_config(u8 conf)
{
return conf | ((~conf & 0x0f) << 4);
}
/**
* Sets the read pointer.
* @param pdev The ds2482 client pointer
@ -399,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
/* If the chip did reset since detect, re-config it */
if (err & DS2482_REG_STS_RST)
ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
0xF0);
ds2482_calculate_config(0x00));
}
mutex_unlock(&pdev->access_lock);
@ -407,6 +418,32 @@ static u8 ds2482_w1_reset_bus(void *data)
return retval;
}
static u8 ds2482_w1_set_pullup(void *data, int delay)
{
struct ds2482_w1_chan *pchan = data;
struct ds2482_data *pdev = pchan->pdev;
u8 retval = 1;
/* if delay is non-zero activate the pullup,
* the strong pullup will be automatically deactivated
* by the master, so do not explicitly deactive it
*/
if (delay) {
/* both waits are crucial, otherwise devices might not be
* powered long enough, causing e.g. a w1_therm sensor to
* provide wrong conversion results
*/
ds2482_wait_1wire_idle(pdev);
/* note: it seems like both SPU and APU have to be set! */
retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
ds2482_calculate_config(DS2482_REG_CFG_SPU |
DS2482_REG_CFG_APU));
ds2482_wait_1wire_idle(pdev);
}
return retval;
}
static int ds2482_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@ -452,7 +489,8 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_count = 8;
/* Set all config items to 0 (off) */
ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0);
ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
ds2482_calculate_config(0x00));
mutex_init(&data->access_lock);
@ -468,6 +506,7 @@ static int ds2482_probe(struct i2c_client *client,
data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit;
data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet;
data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus;
data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
if (err) {

View File

@ -109,34 +109,21 @@ static int mxc_w1_probe(struct platform_device *pdev)
struct resource *res;
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mdev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(mdev->clk)) {
err = PTR_ERR(mdev->clk);
goto failed_clk;
}
mdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
res = request_mem_region(res->start, resource_size(res),
"mxc_w1");
if (!res) {
err = -EBUSY;
goto failed_req;
}
mdev->regs = ioremap(res->start, resource_size(res));
if (!mdev->regs) {
dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
goto failed_ioremap;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!mdev->regs)
return -EBUSY;
clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@ -148,20 +135,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
goto failed_add;
return err;
platform_set_drvdata(pdev, mdev);
return 0;
failed_add:
iounmap(mdev->regs);
failed_ioremap:
release_mem_region(res->start, resource_size(res));
failed_req:
clk_put(mdev->clk);
failed_clk:
kfree(mdev);
return err;
}
/*
@ -170,16 +147,10 @@ failed_clk:
static int mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
w1_remove_master_device(&mdev->bus_master);
iounmap(mdev->regs);
release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(mdev->clk);
clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);

View File

@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
return 0;
}
static int __init w1_gpio_probe(struct platform_device *pdev)
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;

View File

@ -41,14 +41,18 @@ MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature famil
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
* In case the parasite power-detection is not working (seems to be the case
* for some DS18S20) the strong pullup can also be forced, regardless of the
* power state of the devices.
*
* Summary of options:
* - strong_pullup = 0 Disable strong pullup completely
* - strong_pullup = 1 Enable automatic strong pullup detection
* - strong_pullup = 2 Force strong pullup
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
static u8 bad_roms[][9] = {
{0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
{}
};
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf);
@ -168,16 +172,6 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
return 0;
}
static int w1_therm_check_rom(u8 rom[9])
{
int i;
for (i=0; i<sizeof(bad_roms)/9; ++i)
if (!memcmp(bad_roms[i], rom, 9))
return 1;
return 0;
}
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf)
@ -194,10 +188,11 @@ static ssize_t w1_therm_read(struct device *device,
memset(rom, 0, sizeof(rom));
verdict = 0;
crc = 0;
while (max_trying--) {
verdict = 0;
crc = 0;
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
@ -210,7 +205,8 @@ static ssize_t w1_therm_read(struct device *device,
continue;
/* 750ms strong pullup (or delay) after the convert */
if (!external_power && w1_strong_pullup)
if (w1_strong_pullup == 2 ||
(!external_power && w1_strong_pullup))
w1_next_pullup(dev, tm);
w1_write_8(dev, W1_CONVERT_TEMP);
@ -249,7 +245,7 @@ static ssize_t w1_therm_read(struct device *device,
}
}
if (!w1_therm_check_rom(rom))
if (verdict)
break;
}
@ -260,7 +256,7 @@ static ssize_t w1_therm_read(struct device *device,
if (verdict)
memcpy(sl->rom, rom, sizeof(sl->rom));
else
dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);

View File

@ -325,14 +325,28 @@ struct hv_ring_buffer {
u32 interrupt_mask;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
u8 reserved[4084];
/* NOTE:
* The interrupt_mask field is used only for channels but since our
* vmbus connection also uses this data structure and its data starts
* here, we commented out this field.
/*
* Win8 uses some of the reserved bits to implement
* interrupt driven flow management. On the send side
* we can request that the receiver interrupt the sender
* when the ring transitions from being full to being able
* to handle a message of size "pending_send_sz".
*
* Add necessary state for this enhancement.
*/
u32 pending_send_sz;
u32 reserved1[12];
union {
struct {
u32 feat_pending_send_sz:1;
};
u32 value;
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
u8 reserved2[4028];
/*
* Ring data starts here + RingDataStartOffset
@ -405,12 +419,22 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
*/
#define HV_DRV_VERSION "3.1"
/*
* A revision number of vmbus that is used for ensuring both ends on a
* partition are using compatible versions.
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
*/
#define VMBUS_REVISION_NUMBER 13
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_INVAL -1
#define VERSION_CURRENT VERSION_WIN8
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@ -432,9 +456,13 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
struct vmbus_channel_offer {
uuid_le if_type;
uuid_le if_instance;
u64 int_latency; /* in 100ns units */
u32 if_revision;
u32 server_ctx_size; /* in bytes */
/*
* These two fields are not currently used.
*/
u64 reserved1;
u64 reserved2;
u16 chn_flags;
u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
@ -456,7 +484,11 @@ struct vmbus_channel_offer {
unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
} pipe;
} u;
u32 padding;
/*
* The sub_channel_index is defined in win8.
*/
u16 sub_channel_index;
u16 reserved3;
} __packed;
/* Server Flags */
@ -652,7 +684,25 @@ struct vmbus_channel_offer_channel {
struct vmbus_channel_offer offer;
u32 child_relid;
u8 monitorid;
u8 monitor_allocated;
/*
* win7 and beyond splits this field into a bit field.
*/
u8 monitor_allocated:1;
u8 reserved:7;
/*
* These are new fields added in win7 and later.
* Do not access these fields without checking the
* negotiated protocol.
*
* If "is_dedicated_interrupt" is set, we must not set the
* associated bit in the channel bitmap while sending the
* interrupt to the host.
*
* connection_id is to be used in signaling the host.
*/
u16 is_dedicated_interrupt:1;
u16 reserved1:15;
u32 connection_id;
} __packed;
/* Rescind Offer parameters */
@ -683,8 +733,15 @@ struct vmbus_channel_open_channel {
/* GPADL for the channel's ring buffer. */
u32 ringbuffer_gpadlhandle;
/* GPADL for the channel's server context save area. */
u32 server_contextarea_gpadlhandle;
/*
* Starting with win8, this field will be used to specify
* the target virtual processor on which to deliver the interrupt for
* the host to guest communication.
* Prior to win8, incoming channel interrupts would only
* be delivered on cpu 0. Setting this value to 0 would
* preserve the earlier behavior.
*/
u32 target_vp;
/*
* The upstream ring buffer begins at offset zero in the memory
@ -848,6 +905,27 @@ struct vmbus_close_msg {
struct vmbus_channel_close_channel msg;
};
/* Define connection identifier type. */
union hv_connection_id {
u32 asu32;
struct {
u32 id:24;
u32 reserved:8;
} u;
};
/* Definition of the hv_signal_event hypercall input structure. */
struct hv_input_signal_event {
union hv_connection_id connectionid;
u16 flag_number;
u16 rsvdz;
};
struct hv_input_signal_event_buffer {
u64 align8;
struct hv_input_signal_event event;
};
struct vmbus_channel {
struct list_head listentry;
@ -882,8 +960,42 @@ struct vmbus_channel {
void (*onchannel_callback)(void *context);
void *channel_callback_context;
/*
* A channel can be marked for efficient (batched)
* reading:
* If batched_reading is set to "true", we read until the
* channel is empty and hold off interrupts from the host
* during the entire read process.
* If batched_reading is set to "false", the client is not
* going to perform batched reading.
*
* By default we will enable batched reading; specific
* drivers that don't want this behavior can turn it off.
*/
bool batched_reading;
bool is_dedicated_interrupt;
struct hv_input_signal_event_buffer sig_buf;
struct hv_input_signal_event *sig_event;
/*
* Starting with win8, this field will be used to specify
* the target virtual processor on which to deliver the interrupt for
* the host to guest communication.
* Prior to win8, incoming channel interrupts would only
* be delivered on cpu 0. Setting this value to 0 would
* preserve the earlier behavior.
*/
u32 target_vp;
};
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
{
c->batched_reading = state;
}
void vmbus_onmessage(void *context);
int vmbus_request_offers(void);
@ -1046,6 +1158,100 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
.guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
g8, g9, ga, gb, gc, gd, ge, gf },
/*
* GUID definitions of various offer types - services offered to the guest.
*/
/*
* Network GUID
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
.guid = { \
0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
}
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
.guid = { \
0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
}
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
.guid = { \
0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
}
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
.guid = { \
0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
}
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
.guid = { \
0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
}
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
.guid = { \
0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
}
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
.guid = { \
0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \
}
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
.guid = { \
0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
}
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
.guid = { \
0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
}
/*
* Common header for Hyper-V ICs
*/
@ -1150,5 +1356,11 @@ int hv_kvp_init(struct hv_util_service *);
void hv_kvp_deinit(void);
void hv_kvp_onchannelcallback(void *);
/*
* Negotiated version with the Host.
*/
extern __u32 vmbus_proto_version;
#endif /* __KERNEL__ */
#endif /* _HYPERV_H */

View File

@ -75,8 +75,10 @@ enum arizona_type {
#define ARIZONA_IRQ_DCS_HP_DONE 47
#define ARIZONA_IRQ_FLL2_CLOCK_OK 48
#define ARIZONA_IRQ_FLL1_CLOCK_OK 49
#define ARIZONA_IRQ_MICD_CLAMP_RISE 50
#define ARIZONA_IRQ_MICD_CLAMP_FALL 51
#define ARIZONA_NUM_IRQ 50
#define ARIZONA_NUM_IRQ 52
struct snd_soc_dapm_context;

View File

@ -105,9 +105,30 @@ struct arizona_pdata {
*/
int max_channels_clocked[ARIZONA_MAX_AIF];
/** GPIO5 is used for jack detection */
bool jd_gpio5;
/** Use the headphone detect circuit to identify the accessory */
bool hpdet_acc_id;
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
/** GPIO for mic detection polarity */
int micd_pol_gpio;
/** Mic detect ramp rate */
int micd_bias_start_time;
/** Mic detect sample rate */
int micd_rate;
/** Mic detect debounce level */
int micd_dbtime;
/** Force MICBIAS on for mic detect */
bool micd_force_micbias;
/** Headset polarity configurations */
struct arizona_micd_config *micd_configs;
int num_micd_configs;

View File

@ -119,6 +119,8 @@
#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
#define ARIZONA_HEADPHONE_DETECT_1 0x29B
#define ARIZONA_HEADPHONE_DETECT_2 0x29C
#define ARIZONA_HP_DACVAL 0x29F
#define ARIZONA_MICD_CLAMP_CONTROL 0x2A2
#define ARIZONA_MIC_DETECT_1 0x2A3
#define ARIZONA_MIC_DETECT_2 0x2A4
#define ARIZONA_MIC_DETECT_3 0x2A5
@ -1194,6 +1196,14 @@
/*
* R64 (0x40) - Wake control
*/
#define ARIZONA_WKUP_MICD_CLAMP_FALL 0x0080 /* WKUP_MICD_CLAMP_FALL */
#define ARIZONA_WKUP_MICD_CLAMP_FALL_MASK 0x0080 /* WKUP_MICD_CLAMP_FALL */
#define ARIZONA_WKUP_MICD_CLAMP_FALL_SHIFT 7 /* WKUP_MICD_CLAMP_FALL */
#define ARIZONA_WKUP_MICD_CLAMP_FALL_WIDTH 1 /* WKUP_MICD_CLAMP_FALL */
#define ARIZONA_WKUP_MICD_CLAMP_RISE 0x0040 /* WKUP_MICD_CLAMP_RISE */
#define ARIZONA_WKUP_MICD_CLAMP_RISE_MASK 0x0040 /* WKUP_MICD_CLAMP_RISE */
#define ARIZONA_WKUP_MICD_CLAMP_RISE_SHIFT 6 /* WKUP_MICD_CLAMP_RISE */
#define ARIZONA_WKUP_MICD_CLAMP_RISE_WIDTH 1 /* WKUP_MICD_CLAMP_RISE */
#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */
#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */
#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */
@ -2035,6 +2045,9 @@
/*
* R667 (0x29B) - Headphone Detect 1
*/
#define ARIZONA_HP_IMPEDANCE_RANGE_MASK 0x0600 /* HP_IMPEDANCE_RANGE - [10:9] */
#define ARIZONA_HP_IMPEDANCE_RANGE_SHIFT 9 /* HP_IMPEDANCE_RANGE - [10:9] */
#define ARIZONA_HP_IMPEDANCE_RANGE_WIDTH 2 /* HP_IMPEDANCE_RANGE - [10:9] */
#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */
#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */
#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */
@ -2069,6 +2082,21 @@
#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */
#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */
#define ARIZONA_HP_DONE_B 0x8000 /* HP_DONE */
#define ARIZONA_HP_DONE_B_MASK 0x8000 /* HP_DONE */
#define ARIZONA_HP_DONE_B_SHIFT 15 /* HP_DONE */
#define ARIZONA_HP_DONE_B_WIDTH 1 /* HP_DONE */
#define ARIZONA_HP_LVL_B_MASK 0x7FFF /* HP_LVL - [14:0] */
#define ARIZONA_HP_LVL_B_SHIFT 0 /* HP_LVL - [14:0] */
#define ARIZONA_HP_LVL_B_WIDTH 15 /* HP_LVL - [14:0] */
/*
* R674 (0x2A2) - MICD clamp control
*/
#define ARIZONA_MICD_CLAMP_MODE_MASK 0x000F /* MICD_CLAMP_MODE - [3:0] */
#define ARIZONA_MICD_CLAMP_MODE_SHIFT 0 /* MICD_CLAMP_MODE - [3:0] */
#define ARIZONA_MICD_CLAMP_MODE_WIDTH 4 /* MICD_CLAMP_MODE - [3:0] */
/*
* R675 (0x2A3) - Mic Detect 1
*/
@ -5239,6 +5267,14 @@
/*
* R3408 (0xD50) - AOD wkup and trig
*/
#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_MASK 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_SHIFT 7 /* MICD_CLAMP_FALL_TRIG_STS */
#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_WIDTH 1 /* MICD_CLAMP_FALL_TRIG_STS */
#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_MASK 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_SHIFT 6 /* MICD_CLAMP_RISE_TRIG_STS */
#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_WIDTH 1 /* MICD_CLAMP_RISE_TRIG_STS */
#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */
#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */
#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */
@ -5267,6 +5303,12 @@
/*
* R3409 (0xD51) - AOD IRQ1
*/
#define ARIZONA_MICD_CLAMP_FALL_EINT1 0x0080 /* MICD_CLAMP_FALL_EINT1 */
#define ARIZONA_MICD_CLAMP_FALL_EINT1_MASK 0x0080 /* MICD_CLAMP_FALL_EINT1 */
#define ARIZONA_MICD_CLAMP_FALL_EINT1_SHIFT 7 /* MICD_CLAMP_FALL_EINT1 */
#define ARIZONA_MICD_CLAMP_RISE_EINT1 0x0040 /* MICD_CLAMP_RISE_EINT1 */
#define ARIZONA_MICD_CLAMP_RISE_EINT1_MASK 0x0040 /* MICD_CLAMP_RISE_EINT1 */
#define ARIZONA_MICD_CLAMP_RISE_EINT1_SHIFT 6 /* MICD_CLAMP_RISE_EINT1 */
#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */
#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */
#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */
@ -5295,6 +5337,12 @@
/*
* R3410 (0xD52) - AOD IRQ2
*/
#define ARIZONA_MICD_CLAMP_FALL_EINT2 0x0080 /* MICD_CLAMP_FALL_EINT2 */
#define ARIZONA_MICD_CLAMP_FALL_EINT2_MASK 0x0080 /* MICD_CLAMP_FALL_EINT2 */
#define ARIZONA_MICD_CLAMP_FALL_EINT2_SHIFT 7 /* MICD_CLAMP_FALL_EINT2 */
#define ARIZONA_MICD_CLAMP_RISE_EINT2 0x0040 /* MICD_CLAMP_RISE_EINT2 */
#define ARIZONA_MICD_CLAMP_RISE_EINT2_MASK 0x0040 /* MICD_CLAMP_RISE_EINT2 */
#define ARIZONA_MICD_CLAMP_RISE_EINT2_SHIFT 6 /* MICD_CLAMP_RISE_EINT2 */
#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */
#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */
#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */
@ -5379,6 +5427,10 @@
/*
* R3413 (0xD55) - AOD IRQ Raw Status
*/
#define ARIZONA_MICD_CLAMP_STS 0x0008 /* MICD_CLAMP_STS */
#define ARIZONA_MICD_CLAMP_STS_MASK 0x0008 /* MICD_CLAMP_STS */
#define ARIZONA_MICD_CLAMP_STS_SHIFT 3 /* MICD_CLAMP_STS */
#define ARIZONA_MICD_CLAMP_STS_WIDTH 1 /* MICD_CLAMP_STS */
#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */
#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */
#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */
@ -5395,6 +5447,10 @@
/*
* R3414 (0xD56) - Jack detect debounce
*/
#define ARIZONA_MICD_CLAMP_DB 0x0008 /* MICD_CLAMP_DB */
#define ARIZONA_MICD_CLAMP_DB_MASK 0x0008 /* MICD_CLAMP_DB */
#define ARIZONA_MICD_CLAMP_DB_SHIFT 3 /* MICD_CLAMP_DB */
#define ARIZONA_MICD_CLAMP_DB_WIDTH 1 /* MICD_CLAMP_DB */
#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */
#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */
#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */

View File

@ -106,6 +106,92 @@ enum max77693_muic_reg {
MAX77693_MUIC_REG_END,
};
/* MAX77693 MUIC - STATUS1~3 Register */
#define STATUS1_ADC_SHIFT (0)
#define STATUS1_ADCLOW_SHIFT (5)
#define STATUS1_ADCERR_SHIFT (6)
#define STATUS1_ADC1K_SHIFT (7)
#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
#define STATUS2_CHGTYP_SHIFT (0)
#define STATUS2_CHGDETRUN_SHIFT (3)
#define STATUS2_DCDTMR_SHIFT (4)
#define STATUS2_DXOVP_SHIFT (5)
#define STATUS2_VBVOLT_SHIFT (6)
#define STATUS2_VIDRM_SHIFT (7)
#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
#define STATUS3_OVP_SHIFT (2)
#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
/* MAX77693 CDETCTRL1~2 register */
#define CDETCTRL1_CHGDETEN_SHIFT (0)
#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
#define CDETCTRL1_DCDEN_SHIFT (2)
#define CDETCTRL1_DCD2SCT_SHIFT (3)
#define CDETCTRL1_CDDELAY_SHIFT (4)
#define CDETCTRL1_DCDCPL_SHIFT (5)
#define CDETCTRL1_CDPDET_SHIFT (7)
#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
#define CDETCTRL2_VIDRMEN_SHIFT (1)
#define CDETCTRL2_DXOVPEN_SHIFT (3)
#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
/* MAX77693 MUIC - CONTROL1~3 register */
#define COMN1SW_SHIFT (0)
#define COMP2SW_SHIFT (3)
#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
| (1 << COMN1SW_SHIFT))
#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
| (2 << COMN1SW_SHIFT))
#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
| (3 << COMN1SW_SHIFT))
#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
| (0 << COMN1SW_SHIFT))
#define CONTROL2_LOWPWR_SHIFT (0)
#define CONTROL2_ADCEN_SHIFT (1)
#define CONTROL2_CPEN_SHIFT (2)
#define CONTROL2_SFOUTASRT_SHIFT (3)
#define CONTROL2_SFOUTORD_SHIFT (4)
#define CONTROL2_ACCDET_SHIFT (5)
#define CONTROL2_USBCPINT_SHIFT (6)
#define CONTROL2_RCPS_SHIFT (7)
#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
#define CONTROL3_JIGSET_SHIFT (0)
#define CONTROL3_BTLDSET_SHIFT (2)
#define CONTROL3_ADCDBSET_SHIFT (4)
#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
/* Slave addr = 0x90: Haptic */
enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_STATUS = 0x00,

View File

@ -38,6 +38,15 @@ struct max77693_reg_data {
struct max77693_muic_platform_data {
struct max77693_reg_data *init_data;
int num_init_data;
int detcable_delay_ms;
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
int path_usb;
int path_uart;
};
struct max77693_platform_data {

Some files were not shown because too many files have changed in this diff Show More