Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-elphel
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Commits
Open sidebar
Elphel
linux-elphel
Commits
2106955d
Commit
2106955d
authored
Jun 21, 2016
by
Oleg Dzhimiev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
switched card detect to DAT3 (was CD pin)
parent
cc11cd87
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
4045 additions
and
0 deletions
+4045
-0
sdhci.c
src/drivers/mmc/host/sdhci.c
+3582
-0
sdhci.h
src/drivers/mmc/host/sdhci.h
+463
-0
No files found.
src/drivers/mmc/host/sdhci.c
0 → 100644
View file @
2106955d
/*
* linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
*
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* Thanks to the following companies for their support:
*
* - JMicron (hardware and technical support)
*/
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci.h"
#define DRIVER_NAME "sdhci"
#define DBG(f, x...) \
pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
defined(CONFIG_MMC_SDHCI_MODULE))
#define SDHCI_USE_LEDS_CLASS
#endif
#define MAX_TUNING_LOOP 40
static
unsigned
int
debug_quirks
=
0
;
static
unsigned
int
debug_quirks2
;
static
void
sdhci_finish_data
(
struct
sdhci_host
*
);
static
void
sdhci_finish_command
(
struct
sdhci_host
*
);
static
int
sdhci_execute_tuning
(
struct
mmc_host
*
mmc
,
u32
opcode
);
static
void
sdhci_tuning_timer
(
unsigned
long
data
);
static
void
sdhci_enable_preset_value
(
struct
sdhci_host
*
host
,
bool
enable
);
static
int
sdhci_pre_dma_transfer
(
struct
sdhci_host
*
host
,
struct
mmc_data
*
data
,
struct
sdhci_host_next
*
next
);
#ifdef CONFIG_PM
static
int
sdhci_runtime_pm_get
(
struct
sdhci_host
*
host
);
static
int
sdhci_runtime_pm_put
(
struct
sdhci_host
*
host
);
static
void
sdhci_runtime_pm_bus_on
(
struct
sdhci_host
*
host
);
static
void
sdhci_runtime_pm_bus_off
(
struct
sdhci_host
*
host
);
#else
static
inline
int
sdhci_runtime_pm_get
(
struct
sdhci_host
*
host
)
{
return
0
;
}
static
inline
int
sdhci_runtime_pm_put
(
struct
sdhci_host
*
host
)
{
return
0
;
}
static
void
sdhci_runtime_pm_bus_on
(
struct
sdhci_host
*
host
)
{
}
static
void
sdhci_runtime_pm_bus_off
(
struct
sdhci_host
*
host
)
{
}
#endif
static
void
sdhci_dumpregs
(
struct
sdhci_host
*
host
)
{
pr_debug
(
DRIVER_NAME
": =========== REGISTER DUMP (%s)===========
\n
"
,
mmc_hostname
(
host
->
mmc
));
pr_debug
(
DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x
\n
"
,
sdhci_readl
(
host
,
SDHCI_DMA_ADDRESS
),
sdhci_readw
(
host
,
SDHCI_HOST_VERSION
));
pr_debug
(
DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x
\n
"
,
sdhci_readw
(
host
,
SDHCI_BLOCK_SIZE
),
sdhci_readw
(
host
,
SDHCI_BLOCK_COUNT
));
pr_debug
(
DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x
\n
"
,
sdhci_readl
(
host
,
SDHCI_ARGUMENT
),
sdhci_readw
(
host
,
SDHCI_TRANSFER_MODE
));
pr_debug
(
DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x
\n
"
,
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
),
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
));
pr_debug
(
DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x
\n
"
,
sdhci_readb
(
host
,
SDHCI_POWER_CONTROL
),
sdhci_readb
(
host
,
SDHCI_BLOCK_GAP_CONTROL
));
pr_debug
(
DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x
\n
"
,
sdhci_readb
(
host
,
SDHCI_WAKE_UP_CONTROL
),
sdhci_readw
(
host
,
SDHCI_CLOCK_CONTROL
));
pr_debug
(
DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x
\n
"
,
sdhci_readb
(
host
,
SDHCI_TIMEOUT_CONTROL
),
sdhci_readl
(
host
,
SDHCI_INT_STATUS
));
pr_debug
(
DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x
\n
"
,
sdhci_readl
(
host
,
SDHCI_INT_ENABLE
),
sdhci_readl
(
host
,
SDHCI_SIGNAL_ENABLE
));
pr_debug
(
DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x
\n
"
,
sdhci_readw
(
host
,
SDHCI_ACMD12_ERR
),
sdhci_readw
(
host
,
SDHCI_SLOT_INT_STATUS
));
pr_debug
(
DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x
\n
"
,
sdhci_readl
(
host
,
SDHCI_CAPABILITIES
),
sdhci_readl
(
host
,
SDHCI_CAPABILITIES_1
));
pr_debug
(
DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x
\n
"
,
sdhci_readw
(
host
,
SDHCI_COMMAND
),
sdhci_readl
(
host
,
SDHCI_MAX_CURRENT
));
pr_debug
(
DRIVER_NAME
": Host ctl2: 0x%08x
\n
"
,
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
));
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
pr_debug
(
DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x
\n
"
,
readl
(
host
->
ioaddr
+
SDHCI_ADMA_ERROR
),
readl
(
host
->
ioaddr
+
SDHCI_ADMA_ADDRESS_HI
),
readl
(
host
->
ioaddr
+
SDHCI_ADMA_ADDRESS
));
else
pr_debug
(
DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x
\n
"
,
readl
(
host
->
ioaddr
+
SDHCI_ADMA_ERROR
),
readl
(
host
->
ioaddr
+
SDHCI_ADMA_ADDRESS
));
}
pr_debug
(
DRIVER_NAME
": ===========================================
\n
"
);
}
/*****************************************************************************\
* *
* Low level functions *
* *
\*****************************************************************************/
static
void
sdhci_set_card_detection
(
struct
sdhci_host
*
host
,
bool
enable
)
{
u32
present
;
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
||
(
host
->
mmc
->
caps
&
MMC_CAP_NONREMOVABLE
))
return
;
if
(
enable
)
{
present
=
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_DAT3_PRESENT
;
host
->
ier
|=
present
?
SDHCI_INT_CARD_REMOVE
:
SDHCI_INT_CARD_INSERT
;
}
else
{
host
->
ier
&=
~
(
SDHCI_INT_CARD_REMOVE
|
SDHCI_INT_CARD_INSERT
);
}
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
}
static
void
sdhci_enable_card_detection
(
struct
sdhci_host
*
host
)
{
sdhci_set_card_detection
(
host
,
true
);
}
static
void
sdhci_disable_card_detection
(
struct
sdhci_host
*
host
)
{
sdhci_set_card_detection
(
host
,
false
);
}
void
sdhci_reset
(
struct
sdhci_host
*
host
,
u8
mask
)
{
unsigned
long
timeout
;
sdhci_writeb
(
host
,
mask
,
SDHCI_SOFTWARE_RESET
);
if
(
mask
&
SDHCI_RESET_ALL
)
{
host
->
clock
=
0
;
/* Reset-all turns off SD Bus Power */
if
(
host
->
quirks2
&
SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
sdhci_runtime_pm_bus_off
(
host
);
}
/* Wait max 100 ms */
timeout
=
100
;
/* hw clears the bit when it's done */
while
(
sdhci_readb
(
host
,
SDHCI_SOFTWARE_RESET
)
&
mask
)
{
if
(
timeout
==
0
)
{
pr_err
(
"%s: Reset 0x%x never completed.
\n
"
,
mmc_hostname
(
host
->
mmc
),
(
int
)
mask
);
sdhci_dumpregs
(
host
);
return
;
}
timeout
--
;
mdelay
(
1
);
}
}
EXPORT_SYMBOL_GPL
(
sdhci_reset
);
static
void
sdhci_do_reset
(
struct
sdhci_host
*
host
,
u8
mask
)
{
if
(
host
->
quirks
&
SDHCI_QUIRK_NO_CARD_NO_RESET
)
{
if
(
!
(
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_DAT3_PRESENT
))
return
;
}
host
->
ops
->
reset
(
host
,
mask
);
if
(
mask
&
SDHCI_RESET_ALL
)
{
if
(
host
->
flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
))
{
if
(
host
->
ops
->
enable_dma
)
host
->
ops
->
enable_dma
(
host
);
}
/* Resetting the controller clears many */
host
->
preset_enabled
=
false
;
}
}
static
void
sdhci_set_ios
(
struct
mmc_host
*
mmc
,
struct
mmc_ios
*
ios
);
static
void
sdhci_init
(
struct
sdhci_host
*
host
,
int
soft
)
{
if
(
soft
)
sdhci_do_reset
(
host
,
SDHCI_RESET_CMD
|
SDHCI_RESET_DATA
);
else
sdhci_do_reset
(
host
,
SDHCI_RESET_ALL
);
host
->
ier
=
SDHCI_INT_BUS_POWER
|
SDHCI_INT_DATA_END_BIT
|
SDHCI_INT_DATA_CRC
|
SDHCI_INT_DATA_TIMEOUT
|
SDHCI_INT_INDEX
|
SDHCI_INT_END_BIT
|
SDHCI_INT_CRC
|
SDHCI_INT_TIMEOUT
|
SDHCI_INT_DATA_END
|
SDHCI_INT_RESPONSE
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
if
(
soft
)
{
/* force clock reconfiguration */
host
->
clock
=
0
;
sdhci_set_ios
(
host
->
mmc
,
&
host
->
mmc
->
ios
);
}
}
static
void
sdhci_reinit
(
struct
sdhci_host
*
host
)
{
sdhci_init
(
host
,
0
);
/*
* Retuning stuffs are affected by different cards inserted and only
* applicable to UHS-I cards. So reset these fields to their initial
* value when card is removed.
*/
if
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
)
{
host
->
flags
&=
~
SDHCI_USING_RETUNING_TIMER
;
del_timer_sync
(
&
host
->
tuning_timer
);
host
->
flags
&=
~
SDHCI_NEEDS_RETUNING
;
}
sdhci_enable_card_detection
(
host
);
}
static
void
sdhci_activate_led
(
struct
sdhci_host
*
host
)
{
u8
ctrl
;
ctrl
=
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
);
ctrl
|=
SDHCI_CTRL_LED
;
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
}
static
void
sdhci_deactivate_led
(
struct
sdhci_host
*
host
)
{
u8
ctrl
;
ctrl
=
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
);
ctrl
&=
~
SDHCI_CTRL_LED
;
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
}
#ifdef SDHCI_USE_LEDS_CLASS
static
void
sdhci_led_control
(
struct
led_classdev
*
led
,
enum
led_brightness
brightness
)
{
struct
sdhci_host
*
host
=
container_of
(
led
,
struct
sdhci_host
,
led
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
host
->
runtime_suspended
)
goto
out
;
if
(
brightness
==
LED_OFF
)
sdhci_deactivate_led
(
host
);
else
sdhci_activate_led
(
host
);
out:
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
#endif
/*****************************************************************************\
* *
* Core functions *
* *
\*****************************************************************************/
static
void
sdhci_read_block_pio
(
struct
sdhci_host
*
host
)
{
unsigned
long
flags
;
size_t
blksize
,
len
,
chunk
;
u32
uninitialized_var
(
scratch
);
u8
*
buf
;
DBG
(
"PIO reading
\n
"
);
blksize
=
host
->
data
->
blksz
;
chunk
=
0
;
local_irq_save
(
flags
);
while
(
blksize
)
{
if
(
!
sg_miter_next
(
&
host
->
sg_miter
))
BUG
();
len
=
min
(
host
->
sg_miter
.
length
,
blksize
);
blksize
-=
len
;
host
->
sg_miter
.
consumed
=
len
;
buf
=
host
->
sg_miter
.
addr
;
while
(
len
)
{
if
(
chunk
==
0
)
{
scratch
=
sdhci_readl
(
host
,
SDHCI_BUFFER
);
chunk
=
4
;
}
*
buf
=
scratch
&
0xFF
;
buf
++
;
scratch
>>=
8
;
chunk
--
;
len
--
;
}
}
sg_miter_stop
(
&
host
->
sg_miter
);
local_irq_restore
(
flags
);
}
static
void
sdhci_write_block_pio
(
struct
sdhci_host
*
host
)
{
unsigned
long
flags
;
size_t
blksize
,
len
,
chunk
;
u32
scratch
;
u8
*
buf
;
DBG
(
"PIO writing
\n
"
);
blksize
=
host
->
data
->
blksz
;
chunk
=
0
;
scratch
=
0
;
local_irq_save
(
flags
);
while
(
blksize
)
{
if
(
!
sg_miter_next
(
&
host
->
sg_miter
))
BUG
();
len
=
min
(
host
->
sg_miter
.
length
,
blksize
);
blksize
-=
len
;
host
->
sg_miter
.
consumed
=
len
;
buf
=
host
->
sg_miter
.
addr
;
while
(
len
)
{
scratch
|=
(
u32
)
*
buf
<<
(
chunk
*
8
);
buf
++
;
chunk
++
;
len
--
;
if
((
chunk
==
4
)
||
((
len
==
0
)
&&
(
blksize
==
0
)))
{
sdhci_writel
(
host
,
scratch
,
SDHCI_BUFFER
);
chunk
=
0
;
scratch
=
0
;
}
}
}
sg_miter_stop
(
&
host
->
sg_miter
);
local_irq_restore
(
flags
);
}
static
void
sdhci_transfer_pio
(
struct
sdhci_host
*
host
)
{
u32
mask
;
BUG_ON
(
!
host
->
data
);
if
(
host
->
blocks
==
0
)
return
;
if
(
host
->
data
->
flags
&
MMC_DATA_READ
)
mask
=
SDHCI_DATA_AVAILABLE
;
else
mask
=
SDHCI_SPACE_AVAILABLE
;
/*
* Some controllers (JMicron JMB38x) mess up the buffer bits
* for transfers < 4 bytes. As long as it is just one block,
* we can ignore the bits.
*/
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_SMALL_PIO
)
&&
(
host
->
data
->
blocks
==
1
))
mask
=
~
0
;
while
(
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
mask
)
{
if
(
host
->
quirks
&
SDHCI_QUIRK_PIO_NEEDS_DELAY
)
udelay
(
100
);
if
(
host
->
data
->
flags
&
MMC_DATA_READ
)
sdhci_read_block_pio
(
host
);
else
sdhci_write_block_pio
(
host
);
host
->
blocks
--
;
if
(
host
->
blocks
==
0
)
break
;
}
DBG
(
"PIO transfer complete.
\n
"
);
}
static
char
*
sdhci_kmap_atomic
(
struct
scatterlist
*
sg
,
unsigned
long
*
flags
)
{
local_irq_save
(
*
flags
);
return
kmap_atomic
(
sg_page
(
sg
))
+
sg
->
offset
;
}
static
void
sdhci_kunmap_atomic
(
void
*
buffer
,
unsigned
long
*
flags
)
{
kunmap_atomic
(
buffer
);
local_irq_restore
(
*
flags
);
}
static
void
sdhci_adma_write_desc
(
struct
sdhci_host
*
host
,
void
*
desc
,
dma_addr_t
addr
,
int
len
,
unsigned
cmd
)
{
struct
sdhci_adma2_64_desc
*
dma_desc
=
desc
;
/* 32-bit and 64-bit descriptors have these members in same position */
dma_desc
->
cmd
=
cpu_to_le16
(
cmd
);
dma_desc
->
len
=
cpu_to_le16
(
len
);
dma_desc
->
addr_lo
=
cpu_to_le32
((
u32
)
addr
);
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
dma_desc
->
addr_hi
=
cpu_to_le32
((
u64
)
addr
>>
32
);
}
static
void
sdhci_adma_mark_end
(
void
*
desc
)
{
struct
sdhci_adma2_64_desc
*
dma_desc
=
desc
;
/* 32-bit and 64-bit descriptors have 'cmd' in same position */
dma_desc
->
cmd
|=
cpu_to_le16
(
ADMA2_END
);
}
static
int
sdhci_adma_table_pre
(
struct
sdhci_host
*
host
,
struct
mmc_data
*
data
)
{
int
direction
;
void
*
desc
;
void
*
align
;
dma_addr_t
addr
;
dma_addr_t
align_addr
;
int
len
,
offset
;
struct
scatterlist
*
sg
;
int
i
;
char
*
buffer
;
unsigned
long
flags
;
/*
* The spec does not specify endianness of descriptor table.
* We currently guess that it is LE.
*/
if
(
data
->
flags
&
MMC_DATA_READ
)
direction
=
DMA_FROM_DEVICE
;
else
direction
=
DMA_TO_DEVICE
;
host
->
align_addr
=
dma_map_single
(
mmc_dev
(
host
->
mmc
),
host
->
align_buffer
,
host
->
align_buffer_sz
,
direction
);
if
(
dma_mapping_error
(
mmc_dev
(
host
->
mmc
),
host
->
align_addr
))
goto
fail
;
BUG_ON
(
host
->
align_addr
&
host
->
align_mask
);
host
->
sg_count
=
sdhci_pre_dma_transfer
(
host
,
data
,
NULL
);
if
(
host
->
sg_count
<
0
)
goto
unmap_align
;
desc
=
host
->
adma_table
;
align
=
host
->
align_buffer
;
align_addr
=
host
->
align_addr
;
for_each_sg
(
data
->
sg
,
sg
,
host
->
sg_count
,
i
)
{
addr
=
sg_dma_address
(
sg
);
len
=
sg_dma_len
(
sg
);
/*
* The SDHCI specification states that ADMA
* addresses must be 32-bit aligned. If they
* aren't, then we use a bounce buffer for
* the (up to three) bytes that screw up the
* alignment.
*/
offset
=
(
host
->
align_sz
-
(
addr
&
host
->
align_mask
))
&
host
->
align_mask
;
if
(
offset
)
{
if
(
data
->
flags
&
MMC_DATA_WRITE
)
{
buffer
=
sdhci_kmap_atomic
(
sg
,
&
flags
);
memcpy
(
align
,
buffer
,
offset
);
sdhci_kunmap_atomic
(
buffer
,
&
flags
);
}
/* tran, valid */
sdhci_adma_write_desc
(
host
,
desc
,
align_addr
,
offset
,
ADMA2_TRAN_VALID
);
BUG_ON
(
offset
>
65536
);
align
+=
host
->
align_sz
;
align_addr
+=
host
->
align_sz
;
desc
+=
host
->
desc_sz
;
addr
+=
offset
;
len
-=
offset
;
}
BUG_ON
(
len
>
65536
);
/* tran, valid */
sdhci_adma_write_desc
(
host
,
desc
,
addr
,
len
,
ADMA2_TRAN_VALID
);
desc
+=
host
->
desc_sz
;
/*
* If this triggers then we have a calculation bug
* somewhere. :/
*/
WARN_ON
((
desc
-
host
->
adma_table
)
>=
host
->
adma_table_sz
);
}
if
(
host
->
quirks
&
SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
)
{
/*
* Mark the last descriptor as the terminating descriptor
*/
if
(
desc
!=
host
->
adma_table
)
{
desc
-=
host
->
desc_sz
;
sdhci_adma_mark_end
(
desc
);
}
}
else
{
/*
* Add a terminating entry.
*/
/* nop, end, valid */
sdhci_adma_write_desc
(
host
,
desc
,
0
,
0
,
ADMA2_NOP_END_VALID
);
}
/*
* Resync align buffer as we might have changed it.
*/
if
(
data
->
flags
&
MMC_DATA_WRITE
)
{
dma_sync_single_for_device
(
mmc_dev
(
host
->
mmc
),
host
->
align_addr
,
host
->
align_buffer_sz
,
direction
);
}
return
0
;
unmap_align:
dma_unmap_single
(
mmc_dev
(
host
->
mmc
),
host
->
align_addr
,
host
->
align_buffer_sz
,
direction
);
fail:
return
-
EINVAL
;
}
static
void
sdhci_adma_table_post
(
struct
sdhci_host
*
host
,
struct
mmc_data
*
data
)
{
int
direction
;
struct
scatterlist
*
sg
;
int
i
,
size
;
void
*
align
;
char
*
buffer
;
unsigned
long
flags
;
bool
has_unaligned
;
if
(
data
->
flags
&
MMC_DATA_READ
)
direction
=
DMA_FROM_DEVICE
;
else
direction
=
DMA_TO_DEVICE
;
dma_unmap_single
(
mmc_dev
(
host
->
mmc
),
host
->
align_addr
,
host
->
align_buffer_sz
,
direction
);
/* Do a quick scan of the SG list for any unaligned mappings */
has_unaligned
=
false
;
for_each_sg
(
data
->
sg
,
sg
,
host
->
sg_count
,
i
)
if
(
sg_dma_address
(
sg
)
&
host
->
align_mask
)
{
has_unaligned
=
true
;
break
;
}
if
(
has_unaligned
&&
data
->
flags
&
MMC_DATA_READ
)
{
dma_sync_sg_for_cpu
(
mmc_dev
(
host
->
mmc
),
data
->
sg
,
data
->
sg_len
,
direction
);
align
=
host
->
align_buffer
;
for_each_sg
(
data
->
sg
,
sg
,
host
->
sg_count
,
i
)
{
if
(
sg_dma_address
(
sg
)
&
host
->
align_mask
)
{
size
=
host
->
align_sz
-
(
sg_dma_address
(
sg
)
&
host
->
align_mask
);
buffer
=
sdhci_kmap_atomic
(
sg
,
&
flags
);
memcpy
(
buffer
,
align
,
size
);
sdhci_kunmap_atomic
(
buffer
,
&
flags
);
align
+=
host
->
align_sz
;
}
}
}
if
(
!
data
->
host_cookie
)
dma_unmap_sg
(
mmc_dev
(
host
->
mmc
),
data
->
sg
,
data
->
sg_len
,
direction
);
}
static
u8
sdhci_calc_timeout
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
)
{
u8
count
;
struct
mmc_data
*
data
=
cmd
->
data
;
unsigned
target_timeout
,
current_timeout
;
/*
* If the host controller provides us with an incorrect timeout
* value, just skip the check and use 0xE. The hardware may take
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
return
0xE
;
/* Unspecified timeout, assume max */
if
(
!
data
&&
!
cmd
->
busy_timeout
)
return
0xE
;
/* timeout in us */
if
(
!
data
)
target_timeout
=
cmd
->
busy_timeout
*
1000
;
else
{
target_timeout
=
data
->
timeout_ns
/
1000
;
if
(
host
->
clock
)
target_timeout
+=
data
->
timeout_clks
/
host
->
clock
;
}
/*
* Figure out needed cycles.
* We do this in steps in order to fit inside a 32 bit int.
* The first step is the minimum timeout, which will have a
* minimum resolution of 6 bits:
* (1) 2^13*1000 > 2^22,
* (2) host->timeout_clk < 2^16
* =>
* (1) / (2) > 2^6
*/
count
=
0
;
current_timeout
=
(
1
<<
13
)
*
1000
/
host
->
timeout_clk
;
while
(
current_timeout
<
target_timeout
)
{
count
++
;
current_timeout
<<=
1
;
if
(
count
>=
0xF
)
break
;
}
if
(
count
>=
0xF
)
{
DBG
(
"%s: Too large timeout 0x%x requested for CMD%d!
\n
"
,
mmc_hostname
(
host
->
mmc
),
count
,
cmd
->
opcode
);
count
=
0xE
;
}
return
count
;
}
static
void
sdhci_set_transfer_irqs
(
struct
sdhci_host
*
host
)
{
u32
pio_irqs
=
SDHCI_INT_DATA_AVAIL
|
SDHCI_INT_SPACE_AVAIL
;
u32
dma_irqs
=
SDHCI_INT_DMA_END
|
SDHCI_INT_ADMA_ERROR
;
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
host
->
ier
=
(
host
->
ier
&
~
pio_irqs
)
|
dma_irqs
;
else
host
->
ier
=
(
host
->
ier
&
~
dma_irqs
)
|
pio_irqs
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
}
static
void
sdhci_set_timeout
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
)
{
u8
count
;
if
(
host
->
ops
->
set_timeout
)
{
host
->
ops
->
set_timeout
(
host
,
cmd
);
}
else
{
count
=
sdhci_calc_timeout
(
host
,
cmd
);
sdhci_writeb
(
host
,
count
,
SDHCI_TIMEOUT_CONTROL
);
}
}
static
void
sdhci_prepare_data
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
)
{
u8
ctrl
;
struct
mmc_data
*
data
=
cmd
->
data
;
int
ret
;
WARN_ON
(
host
->
data
);
if
(
data
||
(
cmd
->
flags
&
MMC_RSP_BUSY
))
sdhci_set_timeout
(
host
,
cmd
);
if
(
!
data
)
return
;
/* Sanity checks */
BUG_ON
(
data
->
blksz
*
data
->
blocks
>
524288
);
BUG_ON
(
data
->
blksz
>
host
->
mmc
->
max_blk_size
);
BUG_ON
(
data
->
blocks
>
65535
);
host
->
data
=
data
;
host
->
data_early
=
0
;
host
->
data
->
bytes_xfered
=
0
;
if
(
host
->
flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
))
host
->
flags
|=
SDHCI_REQ_USE_DMA
;
/*
* FIXME: This doesn't account for merging when mapping the
* scatterlist.
*/
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
{
int
broken
,
i
;
struct
scatterlist
*
sg
;
broken
=
0
;
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
if
(
host
->
quirks
&
SDHCI_QUIRK_32BIT_ADMA_SIZE
)
broken
=
1
;
}
else
{
if
(
host
->
quirks
&
SDHCI_QUIRK_32BIT_DMA_SIZE
)
broken
=
1
;
}
if
(
unlikely
(
broken
))
{
for_each_sg
(
data
->
sg
,
sg
,
data
->
sg_len
,
i
)
{
if
(
sg
->
length
&
0x3
)
{
DBG
(
"Reverting to PIO because of "
"transfer size (%d)
\n
"
,
sg
->
length
);
host
->
flags
&=
~
SDHCI_REQ_USE_DMA
;
break
;
}
}
}
}
/*
* The assumption here being that alignment is the same after
* translation to device address space.
*/
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
{
int
broken
,
i
;
struct
scatterlist
*
sg
;
broken
=
0
;
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
/*
* As we use 3 byte chunks to work around
* alignment problems, we need to check this
* quirk.
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_32BIT_ADMA_SIZE
)
broken
=
1
;
}
else
{
if
(
host
->
quirks
&
SDHCI_QUIRK_32BIT_DMA_ADDR
)
broken
=
1
;
}
if
(
unlikely
(
broken
))
{
for_each_sg
(
data
->
sg
,
sg
,
data
->
sg_len
,
i
)
{
if
(
sg
->
offset
&
0x3
)
{
DBG
(
"Reverting to PIO because of "
"bad alignment
\n
"
);
host
->
flags
&=
~
SDHCI_REQ_USE_DMA
;
break
;
}
}
}
}
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
{
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
ret
=
sdhci_adma_table_pre
(
host
,
data
);
if
(
ret
)
{
/*
* This only happens when someone fed
* us an invalid request.
*/
WARN_ON
(
1
);
host
->
flags
&=
~
SDHCI_REQ_USE_DMA
;
}
else
{
sdhci_writel
(
host
,
host
->
adma_addr
,
SDHCI_ADMA_ADDRESS
);
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
sdhci_writel
(
host
,
(
u64
)
host
->
adma_addr
>>
32
,
SDHCI_ADMA_ADDRESS_HI
);
}
}
else
{
int
sg_cnt
;
sg_cnt
=
sdhci_pre_dma_transfer
(
host
,
data
,
NULL
);
if
(
sg_cnt
==
0
)
{
/*
* This only happens when someone fed
* us an invalid request.
*/
WARN_ON
(
1
);
host
->
flags
&=
~
SDHCI_REQ_USE_DMA
;
}
else
{
WARN_ON
(
sg_cnt
!=
1
);
sdhci_writel
(
host
,
sg_dma_address
(
data
->
sg
),
SDHCI_DMA_ADDRESS
);
}
}
}
/*
* Always adjust the DMA selection as some controllers
* (e.g. JMicron) can't do PIO properly when the selection
* is ADMA.
*/
if
(
host
->
version
>=
SDHCI_SPEC_200
)
{
ctrl
=
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
);
ctrl
&=
~
SDHCI_CTRL_DMA_MASK
;
if
((
host
->
flags
&
SDHCI_REQ_USE_DMA
)
&&
(
host
->
flags
&
SDHCI_USE_ADMA
))
{
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
ctrl
|=
SDHCI_CTRL_ADMA64
;
else
ctrl
|=
SDHCI_CTRL_ADMA32
;
}
else
{
ctrl
|=
SDHCI_CTRL_SDMA
;
}
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
}
if
(
!
(
host
->
flags
&
SDHCI_REQ_USE_DMA
))
{
int
flags
;
flags
=
SG_MITER_ATOMIC
;
if
(
host
->
data
->
flags
&
MMC_DATA_READ
)
flags
|=
SG_MITER_TO_SG
;
else
flags
|=
SG_MITER_FROM_SG
;
sg_miter_start
(
&
host
->
sg_miter
,
data
->
sg
,
data
->
sg_len
,
flags
);
host
->
blocks
=
data
->
blocks
;
}
sdhci_set_transfer_irqs
(
host
);
/* Set the DMA boundary value and block size */
sdhci_writew
(
host
,
SDHCI_MAKE_BLKSZ
(
SDHCI_DEFAULT_BOUNDARY_ARG
,
data
->
blksz
),
SDHCI_BLOCK_SIZE
);
sdhci_writew
(
host
,
data
->
blocks
,
SDHCI_BLOCK_COUNT
);
}
static
void
sdhci_set_transfer_mode
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
)
{
u16
mode
=
0
;
struct
mmc_data
*
data
=
cmd
->
data
;
if
(
data
==
NULL
)
{
if
(
host
->
quirks2
&
SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
)
{
sdhci_writew
(
host
,
0x0
,
SDHCI_TRANSFER_MODE
);
}
else
{
/* clear Auto CMD settings for no data CMDs */
mode
=
sdhci_readw
(
host
,
SDHCI_TRANSFER_MODE
);
sdhci_writew
(
host
,
mode
&
~
(
SDHCI_TRNS_AUTO_CMD12
|
SDHCI_TRNS_AUTO_CMD23
),
SDHCI_TRANSFER_MODE
);
}
return
;
}
WARN_ON
(
!
host
->
data
);
if
(
!
(
host
->
quirks2
&
SDHCI_QUIRK2_SUPPORT_SINGLE
))
mode
=
SDHCI_TRNS_BLK_CNT_EN
;
if
(
mmc_op_multi
(
cmd
->
opcode
)
||
data
->
blocks
>
1
)
{
mode
=
SDHCI_TRNS_BLK_CNT_EN
|
SDHCI_TRNS_MULTI
;
/*
* If we are sending CMD23, CMD12 never gets sent
* on successful completion (so no Auto-CMD12).
*/
if
(
!
host
->
mrq
->
sbc
&&
(
host
->
flags
&
SDHCI_AUTO_CMD12
))
mode
|=
SDHCI_TRNS_AUTO_CMD12
;
else
if
(
host
->
mrq
->
sbc
&&
(
host
->
flags
&
SDHCI_AUTO_CMD23
))
{
mode
|=
SDHCI_TRNS_AUTO_CMD23
;
sdhci_writel
(
host
,
host
->
mrq
->
sbc
->
arg
,
SDHCI_ARGUMENT2
);
}
}
if
(
data
->
flags
&
MMC_DATA_READ
)
mode
|=
SDHCI_TRNS_READ
;
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
mode
|=
SDHCI_TRNS_DMA
;
sdhci_writew
(
host
,
mode
,
SDHCI_TRANSFER_MODE
);
}
static
void
sdhci_finish_data
(
struct
sdhci_host
*
host
)
{
struct
mmc_data
*
data
;
BUG_ON
(
!
host
->
data
);
data
=
host
->
data
;
host
->
data
=
NULL
;
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
{
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
sdhci_adma_table_post
(
host
,
data
);
else
{
if
(
!
data
->
host_cookie
)
dma_unmap_sg
(
mmc_dev
(
host
->
mmc
),
data
->
sg
,
data
->
sg_len
,
(
data
->
flags
&
MMC_DATA_READ
)
?
DMA_FROM_DEVICE
:
DMA_TO_DEVICE
);
}
}
/*
* The specification states that the block count register must
* be updated, but it does not specify at what point in the
* data flow. That makes the register entirely useless to read
* back so we have to assume that nothing made it to the card
* in the event of an error.
*/
if
(
data
->
error
)
data
->
bytes_xfered
=
0
;
else
data
->
bytes_xfered
=
data
->
blksz
*
data
->
blocks
;
/*
* Need to send CMD12 if -
* a) open-ended multiblock transfer (no CMD23)
* b) error in multiblock transfer
*/
if
(
data
->
stop
&&
(
data
->
error
||
!
host
->
mrq
->
sbc
))
{
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if
(
data
->
error
)
{
sdhci_do_reset
(
host
,
SDHCI_RESET_CMD
);
sdhci_do_reset
(
host
,
SDHCI_RESET_DATA
);
}
sdhci_send_command
(
host
,
data
->
stop
);
}
else
tasklet_schedule
(
&
host
->
finish_tasklet
);
}
void
sdhci_send_command
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
)
{
int
flags
;
u32
mask
;
unsigned
long
timeout
;
WARN_ON
(
host
->
cmd
);
/* Wait max 10 ms */
timeout
=
10
;
mask
=
SDHCI_CMD_INHIBIT
;
if
((
cmd
->
data
!=
NULL
)
||
(
cmd
->
flags
&
MMC_RSP_BUSY
))
mask
|=
SDHCI_DATA_INHIBIT
;
/* We shouldn't wait for data inihibit for stop commands, even
though they might use busy signaling */
if
(
host
->
mrq
->
data
&&
(
cmd
==
host
->
mrq
->
data
->
stop
))
mask
&=
~
SDHCI_DATA_INHIBIT
;
while
(
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
mask
)
{
if
(
timeout
==
0
)
{
pr_err
(
"%s: Controller never released "
"inhibit bit(s).
\n
"
,
mmc_hostname
(
host
->
mmc
));
sdhci_dumpregs
(
host
);
cmd
->
error
=
-
EIO
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
return
;
}
timeout
--
;
mdelay
(
1
);
}
timeout
=
jiffies
;
if
(
!
cmd
->
data
&&
cmd
->
busy_timeout
>
9000
)
timeout
+=
DIV_ROUND_UP
(
cmd
->
busy_timeout
,
1000
)
*
HZ
+
HZ
;
else
timeout
+=
10
*
HZ
;
mod_timer
(
&
host
->
timer
,
timeout
);
host
->
cmd
=
cmd
;
host
->
busy_handle
=
0
;
sdhci_prepare_data
(
host
,
cmd
);
sdhci_writel
(
host
,
cmd
->
arg
,
SDHCI_ARGUMENT
);
sdhci_set_transfer_mode
(
host
,
cmd
);
if
((
cmd
->
flags
&
MMC_RSP_136
)
&&
(
cmd
->
flags
&
MMC_RSP_BUSY
))
{
pr_err
(
"%s: Unsupported response type!
\n
"
,
mmc_hostname
(
host
->
mmc
));
cmd
->
error
=
-
EINVAL
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
return
;
}
if
(
!
(
cmd
->
flags
&
MMC_RSP_PRESENT
))
flags
=
SDHCI_CMD_RESP_NONE
;
else
if
(
cmd
->
flags
&
MMC_RSP_136
)
flags
=
SDHCI_CMD_RESP_LONG
;
else
if
(
cmd
->
flags
&
MMC_RSP_BUSY
)
flags
=
SDHCI_CMD_RESP_SHORT_BUSY
;
else
flags
=
SDHCI_CMD_RESP_SHORT
;
if
(
cmd
->
flags
&
MMC_RSP_CRC
)
flags
|=
SDHCI_CMD_CRC
;
if
(
cmd
->
flags
&
MMC_RSP_OPCODE
)
flags
|=
SDHCI_CMD_INDEX
;
/* CMD19 is special in that the Data Present Select should be set */
if
(
cmd
->
data
||
cmd
->
opcode
==
MMC_SEND_TUNING_BLOCK
||
cmd
->
opcode
==
MMC_SEND_TUNING_BLOCK_HS200
)
flags
|=
SDHCI_CMD_DATA
;
sdhci_writew
(
host
,
SDHCI_MAKE_CMD
(
cmd
->
opcode
,
flags
),
SDHCI_COMMAND
);
}
EXPORT_SYMBOL_GPL
(
sdhci_send_command
);
static
void
sdhci_finish_command
(
struct
sdhci_host
*
host
)
{
int
i
;
BUG_ON
(
host
->
cmd
==
NULL
);
if
(
host
->
cmd
->
flags
&
MMC_RSP_PRESENT
)
{
if
(
host
->
cmd
->
flags
&
MMC_RSP_136
)
{
/* CRC is stripped so we need to do some shifting. */
for
(
i
=
0
;
i
<
4
;
i
++
)
{
host
->
cmd
->
resp
[
i
]
=
sdhci_readl
(
host
,
SDHCI_RESPONSE
+
(
3
-
i
)
*
4
)
<<
8
;
if
(
i
!=
3
)
host
->
cmd
->
resp
[
i
]
|=
sdhci_readb
(
host
,
SDHCI_RESPONSE
+
(
3
-
i
)
*
4
-
1
);
}
}
else
{
host
->
cmd
->
resp
[
0
]
=
sdhci_readl
(
host
,
SDHCI_RESPONSE
);
}
}
host
->
cmd
->
error
=
0
;
/* Finished CMD23, now send actual command. */
if
(
host
->
cmd
==
host
->
mrq
->
sbc
)
{
host
->
cmd
=
NULL
;
sdhci_send_command
(
host
,
host
->
mrq
->
cmd
);
}
else
{
/* Processed actual command. */
if
(
host
->
data
&&
host
->
data_early
)
sdhci_finish_data
(
host
);
if
(
!
host
->
cmd
->
data
)
tasklet_schedule
(
&
host
->
finish_tasklet
);
host
->
cmd
=
NULL
;
}
}
static
u16
sdhci_get_preset_value
(
struct
sdhci_host
*
host
)
{
u16
preset
=
0
;
switch
(
host
->
timing
)
{
case
MMC_TIMING_UHS_SDR12
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_SDR12
);
break
;
case
MMC_TIMING_UHS_SDR25
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_SDR25
);
break
;
case
MMC_TIMING_UHS_SDR50
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_SDR50
);
break
;
case
MMC_TIMING_UHS_SDR104
:
case
MMC_TIMING_MMC_HS200
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_SDR104
);
break
;
case
MMC_TIMING_UHS_DDR50
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_DDR50
);
break
;
case
MMC_TIMING_MMC_HS400
:
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_HS400
);
break
;
default:
pr_warn
(
"%s: Invalid UHS-I mode selected
\n
"
,
mmc_hostname
(
host
->
mmc
));
preset
=
sdhci_readw
(
host
,
SDHCI_PRESET_FOR_SDR12
);
break
;
}
return
preset
;
}
void
sdhci_set_clock
(
struct
sdhci_host
*
host
,
unsigned
int
clock
)
{
int
div
=
0
;
/* Initialized for compiler warning */
int
real_div
=
div
,
clk_mul
=
1
;
u16
clk
=
0
;
unsigned
long
timeout
;
host
->
mmc
->
actual_clock
=
0
;
sdhci_writew
(
host
,
0
,
SDHCI_CLOCK_CONTROL
);
if
(
clock
==
0
)
return
;
if
(
host
->
version
>=
SDHCI_SPEC_300
)
{
if
(
host
->
preset_enabled
)
{
u16
pre_val
;
clk
=
sdhci_readw
(
host
,
SDHCI_CLOCK_CONTROL
);
pre_val
=
sdhci_get_preset_value
(
host
);
div
=
(
pre_val
&
SDHCI_PRESET_SDCLK_FREQ_MASK
)
>>
SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
if
(
host
->
clk_mul
&&
(
pre_val
&
SDHCI_PRESET_CLKGEN_SEL_MASK
))
{
clk
=
SDHCI_PROG_CLOCK_MODE
;
real_div
=
div
+
1
;
clk_mul
=
host
->
clk_mul
;
}
else
{
real_div
=
max_t
(
int
,
1
,
div
<<
1
);
}
goto
clock_set
;
}
/*
* Check if the Host Controller supports Programmable Clock
* Mode.
*/
if
(
host
->
clk_mul
)
{
for
(
div
=
1
;
div
<=
1024
;
div
++
)
{
if
((
host
->
max_clk
*
host
->
clk_mul
/
div
)
<=
clock
)
break
;
}
/*
* Set Programmable Clock Mode in the Clock
* Control register.
*/
clk
=
SDHCI_PROG_CLOCK_MODE
;
real_div
=
div
;
clk_mul
=
host
->
clk_mul
;
div
--
;
}
else
{
/* Version 3.00 divisors must be a multiple of 2. */
if
(
host
->
max_clk
<=
clock
)
div
=
1
;
else
{
for
(
div
=
2
;
div
<
SDHCI_MAX_DIV_SPEC_300
;
div
+=
2
)
{
if
((
host
->
max_clk
/
div
)
<=
clock
)
break
;
}
}
real_div
=
div
;
div
>>=
1
;
if
((
host
->
quirks2
&
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
&&
!
div
&&
host
->
max_clk
<=
25000000
)
div
=
1
;
}
}
else
{
/* Version 2.00 divisors must be a power of 2. */
for
(
div
=
1
;
div
<
SDHCI_MAX_DIV_SPEC_200
;
div
*=
2
)
{
if
((
host
->
max_clk
/
div
)
<=
clock
)
break
;
}
real_div
=
div
;
div
>>=
1
;
}
clock_set:
if
(
real_div
)
host
->
mmc
->
actual_clock
=
(
host
->
max_clk
*
clk_mul
)
/
real_div
;
clk
|=
(
div
&
SDHCI_DIV_MASK
)
<<
SDHCI_DIVIDER_SHIFT
;
clk
|=
((
div
&
SDHCI_DIV_HI_MASK
)
>>
SDHCI_DIV_MASK_LEN
)
<<
SDHCI_DIVIDER_HI_SHIFT
;
clk
|=
SDHCI_CLOCK_INT_EN
;
sdhci_writew
(
host
,
clk
,
SDHCI_CLOCK_CONTROL
);
/* Wait max 20 ms */
timeout
=
20
;
while
(
!
((
clk
=
sdhci_readw
(
host
,
SDHCI_CLOCK_CONTROL
))
&
SDHCI_CLOCK_INT_STABLE
))
{
if
(
timeout
==
0
)
{
pr_err
(
"%s: Internal clock never "
"stabilised.
\n
"
,
mmc_hostname
(
host
->
mmc
));
sdhci_dumpregs
(
host
);
return
;
}
timeout
--
;
mdelay
(
1
);
}
clk
|=
SDHCI_CLOCK_CARD_EN
;
sdhci_writew
(
host
,
clk
,
SDHCI_CLOCK_CONTROL
);
}
EXPORT_SYMBOL_GPL
(
sdhci_set_clock
);
static
void
sdhci_set_power
(
struct
sdhci_host
*
host
,
unsigned
char
mode
,
unsigned
short
vdd
)
{
struct
mmc_host
*
mmc
=
host
->
mmc
;
u8
pwr
=
0
;
if
(
!
IS_ERR
(
mmc
->
supply
.
vmmc
))
{
spin_unlock_irq
(
&
host
->
lock
);
mmc_regulator_set_ocr
(
mmc
,
mmc
->
supply
.
vmmc
,
vdd
);
spin_lock_irq
(
&
host
->
lock
);
if
(
mode
!=
MMC_POWER_OFF
)
sdhci_writeb
(
host
,
SDHCI_POWER_ON
,
SDHCI_POWER_CONTROL
);
else
sdhci_writeb
(
host
,
0
,
SDHCI_POWER_CONTROL
);
return
;
}
if
(
mode
!=
MMC_POWER_OFF
)
{
switch
(
1
<<
vdd
)
{
case
MMC_VDD_165_195
:
pwr
=
SDHCI_POWER_180
;
break
;
case
MMC_VDD_29_30
:
case
MMC_VDD_30_31
:
pwr
=
SDHCI_POWER_300
;
break
;
case
MMC_VDD_32_33
:
case
MMC_VDD_33_34
:
pwr
=
SDHCI_POWER_330
;
break
;
default:
BUG
();
}
}
if
(
host
->
pwr
==
pwr
)
return
;
host
->
pwr
=
pwr
;
if
(
pwr
==
0
)
{
sdhci_writeb
(
host
,
0
,
SDHCI_POWER_CONTROL
);
if
(
host
->
quirks2
&
SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
sdhci_runtime_pm_bus_off
(
host
);
vdd
=
0
;
}
else
{
/*
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
if
(
!
(
host
->
quirks
&
SDHCI_QUIRK_SINGLE_POWER_WRITE
))
sdhci_writeb
(
host
,
0
,
SDHCI_POWER_CONTROL
);
/*
* At least the Marvell CaFe chip gets confused if we set the
* voltage and set turn on power at the same time, so set the
* voltage first.
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
sdhci_writeb
(
host
,
pwr
,
SDHCI_POWER_CONTROL
);
pwr
|=
SDHCI_POWER_ON
;
sdhci_writeb
(
host
,
pwr
,
SDHCI_POWER_CONTROL
);
if
(
host
->
quirks2
&
SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
sdhci_runtime_pm_bus_on
(
host
);
/*
* Some controllers need an extra 10ms delay of 10ms before
* they can apply clock after applying power
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_DELAY_AFTER_POWER
)
mdelay
(
10
);
}
}
/*****************************************************************************\
* *
* MMC callbacks *
* *
\*****************************************************************************/
static
void
sdhci_request
(
struct
mmc_host
*
mmc
,
struct
mmc_request
*
mrq
)
{
struct
sdhci_host
*
host
;
int
present
;
unsigned
long
flags
;
u32
tuning_opcode
;
host
=
mmc_priv
(
mmc
);
sdhci_runtime_pm_get
(
host
);
present
=
mmc_gpio_get_cd
(
host
->
mmc
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
WARN_ON
(
host
->
mrq
!=
NULL
);
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_activate_led
(
host
);
#endif
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
* requests if Auto-CMD12 is enabled.
*/
if
(
!
mrq
->
sbc
&&
(
host
->
flags
&
SDHCI_AUTO_CMD12
))
{
if
(
mrq
->
stop
)
{
mrq
->
data
->
stop
=
NULL
;
mrq
->
stop
=
NULL
;
}
}
host
->
mrq
=
mrq
;
/*
* Firstly check card presence from cd-gpio. The return could
* be one of the following possibilities:
* negative: cd-gpio is not available
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
if
(
present
<
0
)
{
/* If polling, assume that the card is always present. */
if
(
host
->
quirks
&
SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
present
=
1
;
else
present
=
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_DAT3_PRESENT
;
}
if
(
!
present
||
host
->
flags
&
SDHCI_DEVICE_DEAD
)
{
host
->
mrq
->
cmd
->
error
=
-
ENOMEDIUM
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
}
else
{
u32
present_state
;
present_state
=
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
);
/*
* Check if the re-tuning timer has already expired and there
* is no on-going data transfer and DAT0 is not busy. If so,
* we need to execute tuning procedure before sending command.
*/
if
((
host
->
flags
&
SDHCI_NEEDS_RETUNING
)
&&
!
(
present_state
&
(
SDHCI_DOING_WRITE
|
SDHCI_DOING_READ
))
&&
(
present_state
&
SDHCI_DATA_0_LVL_MASK
))
{
if
(
mmc
->
card
)
{
/* eMMC uses cmd21 but sd and sdio use cmd19 */
tuning_opcode
=
mmc
->
card
->
type
==
MMC_TYPE_MMC
?
MMC_SEND_TUNING_BLOCK_HS200
:
MMC_SEND_TUNING_BLOCK
;
/* Here we need to set the host->mrq to NULL,
* in case the pending finish_tasklet
* finishes it incorrectly.
*/
host
->
mrq
=
NULL
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
sdhci_execute_tuning
(
mmc
,
tuning_opcode
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
/* Restore original mmc_request structure */
host
->
mrq
=
mrq
;
}
}
if
(
mrq
->
sbc
&&
!
(
host
->
flags
&
SDHCI_AUTO_CMD23
))
sdhci_send_command
(
host
,
mrq
->
sbc
);
else
sdhci_send_command
(
host
,
mrq
->
cmd
);
}
mmiowb
();
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
void
sdhci_set_bus_width
(
struct
sdhci_host
*
host
,
int
width
)
{
u8
ctrl
;
ctrl
=
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
);
if
(
width
==
MMC_BUS_WIDTH_8
)
{
ctrl
&=
~
SDHCI_CTRL_4BITBUS
;
if
(
host
->
version
>=
SDHCI_SPEC_300
)
ctrl
|=
SDHCI_CTRL_8BITBUS
;
}
else
{
if
(
host
->
version
>=
SDHCI_SPEC_300
)
ctrl
&=
~
SDHCI_CTRL_8BITBUS
;
if
(
width
==
MMC_BUS_WIDTH_4
)
ctrl
|=
SDHCI_CTRL_4BITBUS
;
else
ctrl
&=
~
SDHCI_CTRL_4BITBUS
;
}
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
}
EXPORT_SYMBOL_GPL
(
sdhci_set_bus_width
);
void
sdhci_set_uhs_signaling
(
struct
sdhci_host
*
host
,
unsigned
timing
)
{
u16
ctrl_2
;
ctrl_2
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
/* Select Bus Speed Mode for host */
ctrl_2
&=
~
SDHCI_CTRL_UHS_MASK
;
if
((
timing
==
MMC_TIMING_MMC_HS200
)
||
(
timing
==
MMC_TIMING_UHS_SDR104
))
ctrl_2
|=
SDHCI_CTRL_UHS_SDR104
;
else
if
(
timing
==
MMC_TIMING_UHS_SDR12
)
ctrl_2
|=
SDHCI_CTRL_UHS_SDR12
;
else
if
(
timing
==
MMC_TIMING_UHS_SDR25
)
ctrl_2
|=
SDHCI_CTRL_UHS_SDR25
;
else
if
(
timing
==
MMC_TIMING_UHS_SDR50
)
ctrl_2
|=
SDHCI_CTRL_UHS_SDR50
;
else
if
((
timing
==
MMC_TIMING_UHS_DDR50
)
||
(
timing
==
MMC_TIMING_MMC_DDR52
))
ctrl_2
|=
SDHCI_CTRL_UHS_DDR50
;
else
if
(
timing
==
MMC_TIMING_MMC_HS400
)
ctrl_2
|=
SDHCI_CTRL_HS400
;
/* Non-standard */
sdhci_writew
(
host
,
ctrl_2
,
SDHCI_HOST_CONTROL2
);
}
EXPORT_SYMBOL_GPL
(
sdhci_set_uhs_signaling
);
static
void
sdhci_do_set_ios
(
struct
sdhci_host
*
host
,
struct
mmc_ios
*
ios
)
{
unsigned
long
flags
;
u8
ctrl
;
struct
mmc_host
*
mmc
=
host
->
mmc
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
host
->
flags
&
SDHCI_DEVICE_DEAD
)
{
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
if
(
!
IS_ERR
(
mmc
->
supply
.
vmmc
)
&&
ios
->
power_mode
==
MMC_POWER_OFF
)
mmc_regulator_set_ocr
(
mmc
,
mmc
->
supply
.
vmmc
,
0
);
return
;
}
/*
* Reset the chip on each power off.
* Should clear out any weird states.
*/
if
(
ios
->
power_mode
==
MMC_POWER_OFF
)
{
sdhci_writel
(
host
,
0
,
SDHCI_SIGNAL_ENABLE
);
sdhci_reinit
(
host
);
}
if
(
host
->
version
>=
SDHCI_SPEC_300
&&
(
ios
->
power_mode
==
MMC_POWER_UP
)
&&
!
(
host
->
quirks2
&
SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
sdhci_enable_preset_value
(
host
,
false
);
if
(
!
ios
->
clock
||
ios
->
clock
!=
host
->
clock
)
{
host
->
ops
->
set_clock
(
host
,
ios
->
clock
);
host
->
clock
=
ios
->
clock
;
if
(
host
->
quirks
&
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
host
->
clock
)
{
host
->
timeout_clk
=
host
->
mmc
->
actual_clock
?
host
->
mmc
->
actual_clock
/
1000
:
host
->
clock
/
1000
;
host
->
mmc
->
max_busy_timeout
=
host
->
ops
->
get_max_timeout_count
?
host
->
ops
->
get_max_timeout_count
(
host
)
:
1
<<
27
;
host
->
mmc
->
max_busy_timeout
/=
host
->
timeout_clk
;
}
}
sdhci_set_power
(
host
,
ios
->
power_mode
,
ios
->
vdd
);
if
(
host
->
ops
->
platform_send_init_74_clocks
)
host
->
ops
->
platform_send_init_74_clocks
(
host
,
ios
->
power_mode
);
host
->
ops
->
set_bus_width
(
host
,
ios
->
bus_width
);
ctrl
=
sdhci_readb
(
host
,
SDHCI_HOST_CONTROL
);
if
((
ios
->
timing
==
MMC_TIMING_SD_HS
||
ios
->
timing
==
MMC_TIMING_MMC_HS
)
&&
!
(
host
->
quirks
&
SDHCI_QUIRK_NO_HISPD_BIT
))
ctrl
|=
SDHCI_CTRL_HISPD
;
else
ctrl
&=
~
SDHCI_CTRL_HISPD
;
if
(
host
->
version
>=
SDHCI_SPEC_300
)
{
u16
clk
,
ctrl_2
;
/* In case of UHS-I modes, set High Speed Enable */
if
((
ios
->
timing
==
MMC_TIMING_MMC_HS400
)
||
(
ios
->
timing
==
MMC_TIMING_MMC_HS200
)
||
(
ios
->
timing
==
MMC_TIMING_MMC_DDR52
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR50
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR104
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_DDR50
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR25
))
ctrl
|=
SDHCI_CTRL_HISPD
;
if
(
!
host
->
preset_enabled
)
{
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
/*
* We only need to set Driver Strength if the
* preset value enable is not set.
*/
ctrl_2
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
ctrl_2
&=
~
SDHCI_CTRL_DRV_TYPE_MASK
;
if
(
ios
->
drv_type
==
MMC_SET_DRIVER_TYPE_A
)
ctrl_2
|=
SDHCI_CTRL_DRV_TYPE_A
;
else
if
(
ios
->
drv_type
==
MMC_SET_DRIVER_TYPE_C
)
ctrl_2
|=
SDHCI_CTRL_DRV_TYPE_C
;
sdhci_writew
(
host
,
ctrl_2
,
SDHCI_HOST_CONTROL2
);
}
else
{
/*
* According to SDHC Spec v3.00, if the Preset Value
* Enable in the Host Control 2 register is set, we
* need to reset SD Clock Enable before changing High
* Speed Enable to avoid generating clock gliches.
*/
/* Reset SD Clock Enable */
clk
=
sdhci_readw
(
host
,
SDHCI_CLOCK_CONTROL
);
clk
&=
~
SDHCI_CLOCK_CARD_EN
;
sdhci_writew
(
host
,
clk
,
SDHCI_CLOCK_CONTROL
);
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
/* Re-enable SD Clock */
host
->
ops
->
set_clock
(
host
,
host
->
clock
);
}
/* Reset SD Clock Enable */
clk
=
sdhci_readw
(
host
,
SDHCI_CLOCK_CONTROL
);
clk
&=
~
SDHCI_CLOCK_CARD_EN
;
sdhci_writew
(
host
,
clk
,
SDHCI_CLOCK_CONTROL
);
host
->
ops
->
set_uhs_signaling
(
host
,
ios
->
timing
);
host
->
timing
=
ios
->
timing
;
if
(
!
(
host
->
quirks2
&
SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)
&&
((
ios
->
timing
==
MMC_TIMING_UHS_SDR12
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR25
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR50
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_SDR104
)
||
(
ios
->
timing
==
MMC_TIMING_UHS_DDR50
)))
{
u16
preset
;
sdhci_enable_preset_value
(
host
,
true
);
preset
=
sdhci_get_preset_value
(
host
);
ios
->
drv_type
=
(
preset
&
SDHCI_PRESET_DRV_MASK
)
>>
SDHCI_PRESET_DRV_SHIFT
;
}
/* Re-enable SD Clock */
host
->
ops
->
set_clock
(
host
,
host
->
clock
);
}
else
sdhci_writeb
(
host
,
ctrl
,
SDHCI_HOST_CONTROL
);
/*
* Some (ENE) controllers go apeshit on some ios operation,
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
sdhci_do_reset
(
host
,
SDHCI_RESET_CMD
|
SDHCI_RESET_DATA
);
mmiowb
();
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
static
void
sdhci_set_ios
(
struct
mmc_host
*
mmc
,
struct
mmc_ios
*
ios
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
sdhci_runtime_pm_get
(
host
);
sdhci_do_set_ios
(
host
,
ios
);
sdhci_runtime_pm_put
(
host
);
}
static
int
sdhci_do_get_cd
(
struct
sdhci_host
*
host
)
{
int
gpio_cd
=
mmc_gpio_get_cd
(
host
->
mmc
);
if
(
host
->
flags
&
SDHCI_DEVICE_DEAD
)
return
0
;
/* If polling/nonremovable, assume that the card is always present. */
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
||
(
host
->
mmc
->
caps
&
MMC_CAP_NONREMOVABLE
))
return
1
;
/* Try slot gpio detect */
if
(
!
IS_ERR_VALUE
(
gpio_cd
))
return
!!
gpio_cd
;
/* Host native card detect */
return
!!
(
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_DAT3_PRESENT
);
}
static
int
sdhci_get_cd
(
struct
mmc_host
*
mmc
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
int
ret
;
sdhci_runtime_pm_get
(
host
);
ret
=
sdhci_do_get_cd
(
host
);
sdhci_runtime_pm_put
(
host
);
return
ret
;
}
static
int
sdhci_check_ro
(
struct
sdhci_host
*
host
)
{
unsigned
long
flags
;
int
is_readonly
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
host
->
flags
&
SDHCI_DEVICE_DEAD
)
is_readonly
=
0
;
else
if
(
host
->
ops
->
get_ro
)
is_readonly
=
host
->
ops
->
get_ro
(
host
);
else
is_readonly
=
!
(
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_WRITE_PROTECT
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
/* This quirk needs to be replaced by a callback-function later */
return
host
->
quirks
&
SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
!
is_readonly
:
is_readonly
;
}
#define SAMPLE_COUNT 5
static
int
sdhci_do_get_ro
(
struct
sdhci_host
*
host
)
{
int
i
,
ro_count
;
if
(
!
(
host
->
quirks
&
SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
return
sdhci_check_ro
(
host
);
ro_count
=
0
;
for
(
i
=
0
;
i
<
SAMPLE_COUNT
;
i
++
)
{
if
(
sdhci_check_ro
(
host
))
{
if
(
++
ro_count
>
SAMPLE_COUNT
/
2
)
return
1
;
}
msleep
(
30
);
}
return
0
;
}
static
void
sdhci_hw_reset
(
struct
mmc_host
*
mmc
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
if
(
host
->
ops
&&
host
->
ops
->
hw_reset
)
host
->
ops
->
hw_reset
(
host
);
}
static
int
sdhci_get_ro
(
struct
mmc_host
*
mmc
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
int
ret
;
sdhci_runtime_pm_get
(
host
);
ret
=
sdhci_do_get_ro
(
host
);
sdhci_runtime_pm_put
(
host
);
return
ret
;
}
static
void
sdhci_enable_sdio_irq_nolock
(
struct
sdhci_host
*
host
,
int
enable
)
{
if
(
!
(
host
->
flags
&
SDHCI_DEVICE_DEAD
))
{
if
(
enable
)
host
->
ier
|=
SDHCI_INT_CARD_INT
;
else
host
->
ier
&=
~
SDHCI_INT_CARD_INT
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
mmiowb
();
}
}
static
void
sdhci_enable_sdio_irq
(
struct
mmc_host
*
mmc
,
int
enable
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
unsigned
long
flags
;
sdhci_runtime_pm_get
(
host
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
enable
)
host
->
flags
|=
SDHCI_SDIO_IRQ_ENABLED
;
else
host
->
flags
&=
~
SDHCI_SDIO_IRQ_ENABLED
;
sdhci_enable_sdio_irq_nolock
(
host
,
enable
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
sdhci_runtime_pm_put
(
host
);
}
static
int
sdhci_do_start_signal_voltage_switch
(
struct
sdhci_host
*
host
,
struct
mmc_ios
*
ios
)
{
struct
mmc_host
*
mmc
=
host
->
mmc
;
u16
ctrl
;
int
ret
;
/*
* Signal Voltage Switching is only applicable for Host Controllers
* v3.00 and above.
*/
if
(
host
->
version
<
SDHCI_SPEC_300
)
return
0
;
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
switch
(
ios
->
signal_voltage
)
{
case
MMC_SIGNAL_VOLTAGE_330
:
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl
&=
~
SDHCI_CTRL_VDD_180
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
if
(
!
IS_ERR
(
mmc
->
supply
.
vqmmc
))
{
ret
=
regulator_set_voltage
(
mmc
->
supply
.
vqmmc
,
2700000
,
3600000
);
if
(
ret
)
{
pr_warn
(
"%s: Switching to 3.3V signalling voltage failed
\n
"
,
mmc_hostname
(
mmc
));
return
-
EIO
;
}
}
/* Wait for 5ms */
usleep_range
(
5000
,
5500
);
/* 3.3V regulator output should be stable within 5 ms */
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
if
(
!
(
ctrl
&
SDHCI_CTRL_VDD_180
))
return
0
;
pr_warn
(
"%s: 3.3V regulator output did not became stable
\n
"
,
mmc_hostname
(
mmc
));
return
-
EAGAIN
;
case
MMC_SIGNAL_VOLTAGE_180
:
if
(
!
IS_ERR
(
mmc
->
supply
.
vqmmc
))
{
ret
=
regulator_set_voltage
(
mmc
->
supply
.
vqmmc
,
1700000
,
1950000
);
if
(
ret
)
{
pr_warn
(
"%s: Switching to 1.8V signalling voltage failed
\n
"
,
mmc_hostname
(
mmc
));
return
-
EIO
;
}
}
/*
* Enable 1.8V Signal Enable in the Host Control2
* register
*/
ctrl
|=
SDHCI_CTRL_VDD_180
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
/* Some controller need to do more when switching */
if
(
host
->
ops
->
voltage_switch
)
host
->
ops
->
voltage_switch
(
host
);
/* 1.8V regulator output should be stable within 5 ms */
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
if
(
ctrl
&
SDHCI_CTRL_VDD_180
)
return
0
;
pr_warn
(
"%s: 1.8V regulator output did not became stable
\n
"
,
mmc_hostname
(
mmc
));
return
-
EAGAIN
;
case
MMC_SIGNAL_VOLTAGE_120
:
if
(
!
IS_ERR
(
mmc
->
supply
.
vqmmc
))
{
ret
=
regulator_set_voltage
(
mmc
->
supply
.
vqmmc
,
1100000
,
1300000
);
if
(
ret
)
{
pr_warn
(
"%s: Switching to 1.2V signalling voltage failed
\n
"
,
mmc_hostname
(
mmc
));
return
-
EIO
;
}
}
return
0
;
default:
/* No signal voltage switch required */
return
0
;
}
}
static
int
sdhci_start_signal_voltage_switch
(
struct
mmc_host
*
mmc
,
struct
mmc_ios
*
ios
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
int
err
;
if
(
host
->
version
<
SDHCI_SPEC_300
)
return
0
;
sdhci_runtime_pm_get
(
host
);
err
=
sdhci_do_start_signal_voltage_switch
(
host
,
ios
);
sdhci_runtime_pm_put
(
host
);
return
err
;
}
static
int
sdhci_card_busy
(
struct
mmc_host
*
mmc
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
u32
present_state
;
sdhci_runtime_pm_get
(
host
);
/* Check whether DAT[3:0] is 0000 */
present_state
=
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
);
sdhci_runtime_pm_put
(
host
);
return
!
(
present_state
&
SDHCI_DATA_LVL_MASK
);
}
static
int
sdhci_prepare_hs400_tuning
(
struct
mmc_host
*
mmc
,
struct
mmc_ios
*
ios
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
flags
|=
SDHCI_HS400_TUNING
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
return
0
;
}
static
int
sdhci_execute_tuning
(
struct
mmc_host
*
mmc
,
u32
opcode
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
u16
ctrl
;
int
tuning_loop_counter
=
MAX_TUNING_LOOP
;
int
err
=
0
;
unsigned
long
flags
;
unsigned
int
tuning_count
=
0
;
bool
hs400_tuning
;
sdhci_runtime_pm_get
(
host
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
hs400_tuning
=
host
->
flags
&
SDHCI_HS400_TUNING
;
host
->
flags
&=
~
SDHCI_HS400_TUNING
;
if
(
host
->
tuning_mode
==
SDHCI_TUNING_MODE_1
)
tuning_count
=
host
->
tuning_count
;
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
* Capabilities register.
* If the Host Controller supports the HS200 mode then the
* tuning function has to be executed.
*/
switch
(
host
->
timing
)
{
/* HS400 tuning is done in HS200 mode */
case
MMC_TIMING_MMC_HS400
:
err
=
-
EINVAL
;
goto
out_unlock
;
case
MMC_TIMING_MMC_HS200
:
/*
* Periodic re-tuning for HS400 is not expected to be needed, so
* disable it here.
*/
if
(
hs400_tuning
)
tuning_count
=
0
;
break
;
case
MMC_TIMING_UHS_SDR104
:
break
;
case
MMC_TIMING_UHS_SDR50
:
if
(
host
->
flags
&
SDHCI_SDR50_NEEDS_TUNING
||
host
->
flags
&
SDHCI_SDR104_NEEDS_TUNING
)
break
;
/* FALLTHROUGH */
default:
goto
out_unlock
;
}
if
(
host
->
ops
->
platform_execute_tuning
)
{
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
err
=
host
->
ops
->
platform_execute_tuning
(
host
,
opcode
);
sdhci_runtime_pm_put
(
host
);
return
err
;
}
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
ctrl
|=
SDHCI_CTRL_EXEC_TUNING
;
if
(
host
->
quirks2
&
SDHCI_QUIRK2_TUNING_WORK_AROUND
)
ctrl
|=
SDHCI_CTRL_TUNED_CLK
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
/*
* As per the Host Controller spec v3.00, tuning command
* generates Buffer Read Ready interrupt, so enable that.
*
* Note: The spec clearly says that when tuning sequence
* is being performed, the controller does not generate
* interrupts other than Buffer Read Ready interrupt. But
* to make sure we don't hit a controller bug, we _only_
* enable Buffer Read Ready interrupt here.
*/
sdhci_writel
(
host
,
SDHCI_INT_DATA_AVAIL
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
SDHCI_INT_DATA_AVAIL
,
SDHCI_SIGNAL_ENABLE
);
/*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
* of loops reaches 40 times or a timeout of 150ms occurs.
*/
do
{
struct
mmc_command
cmd
=
{
0
};
struct
mmc_request
mrq
=
{
NULL
};
cmd
.
opcode
=
opcode
;
cmd
.
arg
=
0
;
cmd
.
flags
=
MMC_RSP_R1
|
MMC_CMD_ADTC
;
cmd
.
retries
=
0
;
cmd
.
data
=
NULL
;
cmd
.
error
=
0
;
if
(
tuning_loop_counter
--
==
0
)
break
;
mrq
.
cmd
=
&
cmd
;
host
->
mrq
=
&
mrq
;
/*
* In response to CMD19, the card sends 64 bytes of tuning
* block to the Host Controller. So we set the block size
* to 64 here.
*/
if
(
cmd
.
opcode
==
MMC_SEND_TUNING_BLOCK_HS200
)
{
if
(
mmc
->
ios
.
bus_width
==
MMC_BUS_WIDTH_8
)
sdhci_writew
(
host
,
SDHCI_MAKE_BLKSZ
(
7
,
128
),
SDHCI_BLOCK_SIZE
);
else
if
(
mmc
->
ios
.
bus_width
==
MMC_BUS_WIDTH_4
)
sdhci_writew
(
host
,
SDHCI_MAKE_BLKSZ
(
7
,
64
),
SDHCI_BLOCK_SIZE
);
}
else
{
sdhci_writew
(
host
,
SDHCI_MAKE_BLKSZ
(
7
,
64
),
SDHCI_BLOCK_SIZE
);
}
/*
* The tuning block is sent by the card to the host controller.
* So we set the TRNS_READ bit in the Transfer Mode register.
* This also takes care of setting DMA Enable and Multi Block
* Select in the same register to 0.
*/
sdhci_writew
(
host
,
SDHCI_TRNS_READ
,
SDHCI_TRANSFER_MODE
);
sdhci_send_command
(
host
,
&
cmd
);
host
->
cmd
=
NULL
;
host
->
mrq
=
NULL
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
/* Wait for Buffer Read Ready interrupt */
wait_event_interruptible_timeout
(
host
->
buf_ready_int
,
(
host
->
tuning_done
==
1
),
msecs_to_jiffies
(
50
));
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
!
host
->
tuning_done
)
{
pr_info
(
DRIVER_NAME
": Timeout waiting for "
"Buffer Read Ready interrupt during tuning "
"procedure, falling back to fixed sampling "
"clock
\n
"
);
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
ctrl
&=
~
SDHCI_CTRL_TUNED_CLK
;
ctrl
&=
~
SDHCI_CTRL_EXEC_TUNING
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
err
=
-
EIO
;
goto
out
;
}
host
->
tuning_done
=
0
;
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
/* eMMC spec does not require a delay between tuning cycles */
if
(
opcode
==
MMC_SEND_TUNING_BLOCK
)
mdelay
(
1
);
}
while
(
ctrl
&
SDHCI_CTRL_EXEC_TUNING
);
/*
* The Host Driver has exhausted the maximum number of loops allowed,
* so use fixed sampling frequency.
*/
if
(
tuning_loop_counter
<
0
)
{
ctrl
&=
~
SDHCI_CTRL_TUNED_CLK
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
}
if
(
!
(
ctrl
&
SDHCI_CTRL_TUNED_CLK
))
{
pr_info
(
DRIVER_NAME
": Tuning procedure"
" failed, falling back to fixed sampling"
" clock
\n
"
);
err
=
-
EIO
;
}
out:
host
->
flags
&=
~
SDHCI_NEEDS_RETUNING
;
if
(
tuning_count
)
{
host
->
flags
|=
SDHCI_USING_RETUNING_TIMER
;
mod_timer
(
&
host
->
tuning_timer
,
jiffies
+
tuning_count
*
HZ
);
}
/*
* In case tuning fails, host controllers which support re-tuning can
* try tuning again at a later time, when the re-tuning timer expires.
* So for these controllers, we return 0. Since there might be other
* controllers who do not have this capability, we return error for
* them. SDHCI_USING_RETUNING_TIMER means the host is currently using
* a retuning timer to do the retuning for the card.
*/
if
(
err
&&
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
))
err
=
0
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
out_unlock:
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
sdhci_runtime_pm_put
(
host
);
return
err
;
}
static
void
sdhci_enable_preset_value
(
struct
sdhci_host
*
host
,
bool
enable
)
{
/* Host Controller v3.00 defines preset value registers */
if
(
host
->
version
<
SDHCI_SPEC_300
)
return
;
/*
* We only enable or disable Preset Value if they are not already
* enabled or disabled respectively. Otherwise, we bail out.
*/
if
(
host
->
preset_enabled
!=
enable
)
{
u16
ctrl
=
sdhci_readw
(
host
,
SDHCI_HOST_CONTROL2
);
if
(
enable
)
ctrl
|=
SDHCI_CTRL_PRESET_VAL_ENABLE
;
else
ctrl
&=
~
SDHCI_CTRL_PRESET_VAL_ENABLE
;
sdhci_writew
(
host
,
ctrl
,
SDHCI_HOST_CONTROL2
);
if
(
enable
)
host
->
flags
|=
SDHCI_PV_ENABLED
;
else
host
->
flags
&=
~
SDHCI_PV_ENABLED
;
host
->
preset_enabled
=
enable
;
}
}
static
void
sdhci_post_req
(
struct
mmc_host
*
mmc
,
struct
mmc_request
*
mrq
,
int
err
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
struct
mmc_data
*
data
=
mrq
->
data
;
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
{
if
(
data
->
host_cookie
)
dma_unmap_sg
(
mmc_dev
(
host
->
mmc
),
data
->
sg
,
data
->
sg_len
,
data
->
flags
&
MMC_DATA_WRITE
?
DMA_TO_DEVICE
:
DMA_FROM_DEVICE
);
mrq
->
data
->
host_cookie
=
0
;
}
}
static
int
sdhci_pre_dma_transfer
(
struct
sdhci_host
*
host
,
struct
mmc_data
*
data
,
struct
sdhci_host_next
*
next
)
{
int
sg_count
;
if
(
!
next
&&
data
->
host_cookie
&&
data
->
host_cookie
!=
host
->
next_data
.
cookie
)
{
pr_debug
(
DRIVER_NAME
"[%s] invalid cookie: %d, next-cookie %d
\n
"
,
__func__
,
data
->
host_cookie
,
host
->
next_data
.
cookie
);
data
->
host_cookie
=
0
;
}
/* Check if next job is already prepared */
if
(
next
||
(
!
next
&&
data
->
host_cookie
!=
host
->
next_data
.
cookie
))
{
sg_count
=
dma_map_sg
(
mmc_dev
(
host
->
mmc
),
data
->
sg
,
data
->
sg_len
,
data
->
flags
&
MMC_DATA_WRITE
?
DMA_TO_DEVICE
:
DMA_FROM_DEVICE
);
}
else
{
sg_count
=
host
->
next_data
.
sg_count
;
host
->
next_data
.
sg_count
=
0
;
}
if
(
sg_count
==
0
)
return
-
EINVAL
;
if
(
next
)
{
next
->
sg_count
=
sg_count
;
data
->
host_cookie
=
++
next
->
cookie
<
0
?
1
:
next
->
cookie
;
}
else
host
->
sg_count
=
sg_count
;
return
sg_count
;
}
static
void
sdhci_pre_req
(
struct
mmc_host
*
mmc
,
struct
mmc_request
*
mrq
,
bool
is_first_req
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
if
(
mrq
->
data
->
host_cookie
)
{
mrq
->
data
->
host_cookie
=
0
;
return
;
}
if
(
host
->
flags
&
SDHCI_REQ_USE_DMA
)
if
(
sdhci_pre_dma_transfer
(
host
,
mrq
->
data
,
&
host
->
next_data
)
<
0
)
mrq
->
data
->
host_cookie
=
0
;
}
static
void
sdhci_card_event
(
struct
mmc_host
*
mmc
)
{
struct
sdhci_host
*
host
=
mmc_priv
(
mmc
);
unsigned
long
flags
;
int
present
;
/* First check if client has provided their own card event */
if
(
host
->
ops
->
card_event
)
host
->
ops
->
card_event
(
host
);
present
=
sdhci_do_get_cd
(
host
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
/* Check host->mrq first in case we are runtime suspended */
if
(
host
->
mrq
&&
!
present
)
{
pr_err
(
"%s: Card removed during transfer!
\n
"
,
mmc_hostname
(
host
->
mmc
));
pr_err
(
"%s: Resetting controller.
\n
"
,
mmc_hostname
(
host
->
mmc
));
sdhci_do_reset
(
host
,
SDHCI_RESET_CMD
);
sdhci_do_reset
(
host
,
SDHCI_RESET_DATA
);
host
->
mrq
->
cmd
->
error
=
-
ENOMEDIUM
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
}
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
static
const
struct
mmc_host_ops
sdhci_ops
=
{
.
request
=
sdhci_request
,
.
post_req
=
sdhci_post_req
,
.
pre_req
=
sdhci_pre_req
,
.
set_ios
=
sdhci_set_ios
,
.
get_cd
=
sdhci_get_cd
,
.
get_ro
=
sdhci_get_ro
,
.
hw_reset
=
sdhci_hw_reset
,
.
enable_sdio_irq
=
sdhci_enable_sdio_irq
,
.
start_signal_voltage_switch
=
sdhci_start_signal_voltage_switch
,
.
prepare_hs400_tuning
=
sdhci_prepare_hs400_tuning
,
.
execute_tuning
=
sdhci_execute_tuning
,
.
card_event
=
sdhci_card_event
,
.
card_busy
=
sdhci_card_busy
,
};
/*****************************************************************************\
* *
* Tasklets *
* *
\*****************************************************************************/
static
void
sdhci_tasklet_finish
(
unsigned
long
param
)
{
struct
sdhci_host
*
host
;
unsigned
long
flags
;
struct
mmc_request
*
mrq
;
host
=
(
struct
sdhci_host
*
)
param
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
/*
* If this tasklet gets rescheduled while running, it will
* be run again afterwards but without any active request.
*/
if
(
!
host
->
mrq
)
{
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
return
;
}
del_timer
(
&
host
->
timer
);
mrq
=
host
->
mrq
;
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if
(
!
(
host
->
flags
&
SDHCI_DEVICE_DEAD
)
&&
((
mrq
->
cmd
&&
mrq
->
cmd
->
error
)
||
(
mrq
->
sbc
&&
mrq
->
sbc
->
error
)
||
(
mrq
->
data
&&
((
mrq
->
data
->
error
&&
!
mrq
->
data
->
stop
)
||
(
mrq
->
data
->
stop
&&
mrq
->
data
->
stop
->
error
)))
||
(
host
->
quirks
&
SDHCI_QUIRK_RESET_AFTER_REQUEST
)))
{
/* Some controllers need this kick or reset won't work here */
if
(
host
->
quirks
&
SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
/* This is to force an update */
host
->
ops
->
set_clock
(
host
,
host
->
clock
);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
sdhci_do_reset
(
host
,
SDHCI_RESET_CMD
);
sdhci_do_reset
(
host
,
SDHCI_RESET_DATA
);
}
host
->
mrq
=
NULL
;
host
->
cmd
=
NULL
;
host
->
data
=
NULL
;
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_deactivate_led
(
host
);
#endif
mmiowb
();
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
mmc_request_done
(
host
->
mmc
,
mrq
);
sdhci_runtime_pm_put
(
host
);
}
static
void
sdhci_timeout_timer
(
unsigned
long
data
)
{
struct
sdhci_host
*
host
;
unsigned
long
flags
;
host
=
(
struct
sdhci_host
*
)
data
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
host
->
mrq
)
{
pr_err
(
"%s: Timeout waiting for hardware "
"interrupt.
\n
"
,
mmc_hostname
(
host
->
mmc
));
sdhci_dumpregs
(
host
);
if
(
host
->
data
)
{
host
->
data
->
error
=
-
ETIMEDOUT
;
sdhci_finish_data
(
host
);
}
else
{
if
(
host
->
cmd
)
host
->
cmd
->
error
=
-
ETIMEDOUT
;
else
host
->
mrq
->
cmd
->
error
=
-
ETIMEDOUT
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
}
}
mmiowb
();
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
static
void
sdhci_tuning_timer
(
unsigned
long
data
)
{
struct
sdhci_host
*
host
;
unsigned
long
flags
;
host
=
(
struct
sdhci_host
*
)
data
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
flags
|=
SDHCI_NEEDS_RETUNING
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
/*****************************************************************************\
* *
* Interrupt handling *
* *
\*****************************************************************************/
static
void
sdhci_cmd_irq
(
struct
sdhci_host
*
host
,
u32
intmask
,
u32
*
mask
)
{
BUG_ON
(
intmask
==
0
);
if
(
!
host
->
cmd
)
{
pr_err
(
"%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.
\n
"
,
mmc_hostname
(
host
->
mmc
),
(
unsigned
)
intmask
);
sdhci_dumpregs
(
host
);
return
;
}
if
(
intmask
&
SDHCI_INT_TIMEOUT
)
host
->
cmd
->
error
=
-
ETIMEDOUT
;
else
if
(
intmask
&
(
SDHCI_INT_CRC
|
SDHCI_INT_END_BIT
|
SDHCI_INT_INDEX
))
host
->
cmd
->
error
=
-
EILSEQ
;
if
(
host
->
cmd
->
error
)
{
tasklet_schedule
(
&
host
->
finish_tasklet
);
return
;
}
/*
* The host can send and interrupt when the busy state has
* ended, allowing us to wait without wasting CPU cycles.
* Unfortunately this is overloaded on the "data complete"
* interrupt, so we need to take some care when handling
* it.
*
* Note: The 1.0 specification is a bit ambiguous about this
* feature so there might be some problems with older
* controllers.
*/
if
(
host
->
cmd
->
flags
&
MMC_RSP_BUSY
)
{
if
(
host
->
cmd
->
data
)
DBG
(
"Cannot wait for busy signal when also "
"doing a data transfer"
);
else
if
(
!
(
host
->
quirks
&
SDHCI_QUIRK_NO_BUSY_IRQ
)
&&
!
host
->
busy_handle
)
{
/* Mark that command complete before busy is ended */
host
->
busy_handle
=
1
;
return
;
}
/* The controller does not support the end-of-busy IRQ,
* fall through and take the SDHCI_INT_RESPONSE */
}
else
if
((
host
->
quirks2
&
SDHCI_QUIRK2_STOP_WITH_TC
)
&&
host
->
cmd
->
opcode
==
MMC_STOP_TRANSMISSION
&&
!
host
->
data
)
{
*
mask
&=
~
SDHCI_INT_DATA_END
;
}
if
(
intmask
&
SDHCI_INT_RESPONSE
)
sdhci_finish_command
(
host
);
}
#ifdef CONFIG_MMC_DEBUG
static
void
sdhci_adma_show_error
(
struct
sdhci_host
*
host
)
{
const
char
*
name
=
mmc_hostname
(
host
->
mmc
);
void
*
desc
=
host
->
adma_table
;
sdhci_dumpregs
(
host
);
while
(
true
)
{
struct
sdhci_adma2_64_desc
*
dma_desc
=
desc
;
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
DBG
(
"%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x
\n
"
,
name
,
desc
,
le32_to_cpu
(
dma_desc
->
addr_hi
),
le32_to_cpu
(
dma_desc
->
addr_lo
),
le16_to_cpu
(
dma_desc
->
len
),
le16_to_cpu
(
dma_desc
->
cmd
));
else
DBG
(
"%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x
\n
"
,
name
,
desc
,
le32_to_cpu
(
dma_desc
->
addr_lo
),
le16_to_cpu
(
dma_desc
->
len
),
le16_to_cpu
(
dma_desc
->
cmd
));
desc
+=
host
->
desc_sz
;
if
(
dma_desc
->
cmd
&
cpu_to_le16
(
ADMA2_END
))
break
;
}
}
#else
static
void
sdhci_adma_show_error
(
struct
sdhci_host
*
host
)
{
}
#endif
static
void
sdhci_data_irq
(
struct
sdhci_host
*
host
,
u32
intmask
)
{
u32
command
;
BUG_ON
(
intmask
==
0
);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if
(
intmask
&
SDHCI_INT_DATA_AVAIL
)
{
command
=
SDHCI_GET_CMD
(
sdhci_readw
(
host
,
SDHCI_COMMAND
));
if
(
command
==
MMC_SEND_TUNING_BLOCK
||
command
==
MMC_SEND_TUNING_BLOCK_HS200
)
{
host
->
tuning_done
=
1
;
wake_up
(
&
host
->
buf_ready_int
);
return
;
}
}
if
(
!
host
->
data
)
{
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
*/
if
(
host
->
cmd
&&
(
host
->
cmd
->
flags
&
MMC_RSP_BUSY
))
{
if
(
intmask
&
SDHCI_INT_DATA_TIMEOUT
)
{
host
->
cmd
->
error
=
-
ETIMEDOUT
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
return
;
}
if
(
intmask
&
SDHCI_INT_DATA_END
)
{
/*
* Some cards handle busy-end interrupt
* before the command completed, so make
* sure we do things in the proper order.
*/
if
(
host
->
busy_handle
)
sdhci_finish_command
(
host
);
else
host
->
busy_handle
=
1
;
return
;
}
}
pr_err
(
"%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.
\n
"
,
mmc_hostname
(
host
->
mmc
),
(
unsigned
)
intmask
);
sdhci_dumpregs
(
host
);
return
;
}
if
(
intmask
&
SDHCI_INT_DATA_TIMEOUT
)
host
->
data
->
error
=
-
ETIMEDOUT
;
else
if
(
intmask
&
SDHCI_INT_DATA_END_BIT
)
host
->
data
->
error
=
-
EILSEQ
;
else
if
((
intmask
&
SDHCI_INT_DATA_CRC
)
&&
SDHCI_GET_CMD
(
sdhci_readw
(
host
,
SDHCI_COMMAND
))
!=
MMC_BUS_TEST_R
)
host
->
data
->
error
=
-
EILSEQ
;
else
if
(
intmask
&
SDHCI_INT_ADMA_ERROR
)
{
pr_err
(
"%s: ADMA error
\n
"
,
mmc_hostname
(
host
->
mmc
));
sdhci_adma_show_error
(
host
);
host
->
data
->
error
=
-
EIO
;
if
(
host
->
ops
->
adma_workaround
)
host
->
ops
->
adma_workaround
(
host
,
intmask
);
}
if
(
host
->
data
->
error
)
sdhci_finish_data
(
host
);
else
{
if
(
intmask
&
(
SDHCI_INT_DATA_AVAIL
|
SDHCI_INT_SPACE_AVAIL
))
sdhci_transfer_pio
(
host
);
/*
* We currently don't do anything fancy with DMA
* boundaries, but as we can't disable the feature
* we need to at least restart the transfer.
*
* According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
* should return a valid address to continue from, but as
* some controllers are faulty, don't trust them.
*/
if
(
intmask
&
SDHCI_INT_DMA_END
)
{
u32
dmastart
,
dmanow
;
dmastart
=
sg_dma_address
(
host
->
data
->
sg
);
dmanow
=
dmastart
+
host
->
data
->
bytes_xfered
;
/*
* Force update to the next DMA block boundary.
*/
dmanow
=
(
dmanow
&
~
(
SDHCI_DEFAULT_BOUNDARY_SIZE
-
1
))
+
SDHCI_DEFAULT_BOUNDARY_SIZE
;
host
->
data
->
bytes_xfered
=
dmanow
-
dmastart
;
DBG
(
"%s: DMA base 0x%08x, transferred 0x%06x bytes,"
" next 0x%08x
\n
"
,
mmc_hostname
(
host
->
mmc
),
dmastart
,
host
->
data
->
bytes_xfered
,
dmanow
);
sdhci_writel
(
host
,
dmanow
,
SDHCI_DMA_ADDRESS
);
}
if
(
intmask
&
SDHCI_INT_DATA_END
)
{
if
(
host
->
cmd
)
{
/*
* Data managed to finish before the
* command completed. Make sure we do
* things in the proper order.
*/
host
->
data_early
=
1
;
}
else
{
sdhci_finish_data
(
host
);
}
}
}
}
static
irqreturn_t
sdhci_irq
(
int
irq
,
void
*
dev_id
)
{
irqreturn_t
result
=
IRQ_NONE
;
struct
sdhci_host
*
host
=
dev_id
;
u32
intmask
,
mask
,
unexpected
=
0
;
int
max_loops
=
16
;
spin_lock
(
&
host
->
lock
);
if
(
host
->
runtime_suspended
&&
!
sdhci_sdio_irq_enabled
(
host
))
{
spin_unlock
(
&
host
->
lock
);
return
IRQ_NONE
;
}
intmask
=
sdhci_readl
(
host
,
SDHCI_INT_STATUS
);
if
(
!
intmask
||
intmask
==
0xffffffff
)
{
result
=
IRQ_NONE
;
goto
out
;
}
do
{
/* Clear selected interrupts. */
mask
=
intmask
&
(
SDHCI_INT_CMD_MASK
|
SDHCI_INT_DATA_MASK
|
SDHCI_INT_BUS_POWER
);
sdhci_writel
(
host
,
mask
,
SDHCI_INT_STATUS
);
DBG
(
"*** %s got interrupt: 0x%08x
\n
"
,
mmc_hostname
(
host
->
mmc
),
intmask
);
if
(
intmask
&
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
))
{
u32
present
=
sdhci_readl
(
host
,
SDHCI_PRESENT_STATE
)
&
SDHCI_DAT3_PRESENT
;
/*
* There is a observation on i.mx esdhc. INSERT
* bit will be immediately set again when it gets
* cleared, if a card is inserted. We have to mask
* the irq to prevent interrupt storm which will
* freeze the system. And the REMOVE gets the
* same situation.
*
* More testing are needed here to ensure it works
* for other platforms though.
*/
host
->
ier
&=
~
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
);
host
->
ier
|=
present
?
SDHCI_INT_CARD_REMOVE
:
SDHCI_INT_CARD_INSERT
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
sdhci_writel
(
host
,
intmask
&
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
),
SDHCI_INT_STATUS
);
host
->
thread_isr
|=
intmask
&
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
);
result
=
IRQ_WAKE_THREAD
;
}
if
(
intmask
&
SDHCI_INT_CMD_MASK
)
sdhci_cmd_irq
(
host
,
intmask
&
SDHCI_INT_CMD_MASK
,
&
intmask
);
if
(
intmask
&
SDHCI_INT_DATA_MASK
)
sdhci_data_irq
(
host
,
intmask
&
SDHCI_INT_DATA_MASK
);
if
(
intmask
&
SDHCI_INT_BUS_POWER
)
pr_err
(
"%s: Card is consuming too much power!
\n
"
,
mmc_hostname
(
host
->
mmc
));
if
(
intmask
&
SDHCI_INT_CARD_INT
)
{
sdhci_enable_sdio_irq_nolock
(
host
,
false
);
host
->
thread_isr
|=
SDHCI_INT_CARD_INT
;
result
=
IRQ_WAKE_THREAD
;
}
intmask
&=
~
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
|
SDHCI_INT_CMD_MASK
|
SDHCI_INT_DATA_MASK
|
SDHCI_INT_ERROR
|
SDHCI_INT_BUS_POWER
|
SDHCI_INT_CARD_INT
);
if
(
intmask
)
{
unexpected
|=
intmask
;
sdhci_writel
(
host
,
intmask
,
SDHCI_INT_STATUS
);
}
if
(
result
==
IRQ_NONE
)
result
=
IRQ_HANDLED
;
intmask
=
sdhci_readl
(
host
,
SDHCI_INT_STATUS
);
}
while
(
intmask
&&
--
max_loops
);
out:
spin_unlock
(
&
host
->
lock
);
if
(
unexpected
)
{
pr_err
(
"%s: Unexpected interrupt 0x%08x.
\n
"
,
mmc_hostname
(
host
->
mmc
),
unexpected
);
sdhci_dumpregs
(
host
);
}
return
result
;
}
static
irqreturn_t
sdhci_thread_irq
(
int
irq
,
void
*
dev_id
)
{
struct
sdhci_host
*
host
=
dev_id
;
unsigned
long
flags
;
u32
isr
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
isr
=
host
->
thread_isr
;
host
->
thread_isr
=
0
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
if
(
isr
&
(
SDHCI_INT_CARD_INSERT
|
SDHCI_INT_CARD_REMOVE
))
{
sdhci_card_event
(
host
->
mmc
);
mmc_detect_change
(
host
->
mmc
,
msecs_to_jiffies
(
200
));
}
if
(
isr
&
SDHCI_INT_CARD_INT
)
{
sdio_run_irqs
(
host
->
mmc
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
if
(
host
->
flags
&
SDHCI_SDIO_IRQ_ENABLED
)
sdhci_enable_sdio_irq_nolock
(
host
,
true
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
return
isr
?
IRQ_HANDLED
:
IRQ_NONE
;
}
/*****************************************************************************\
* *
* Suspend/resume *
* *
\*****************************************************************************/
#ifdef CONFIG_PM
void
sdhci_enable_irq_wakeups
(
struct
sdhci_host
*
host
)
{
u8
val
;
u8
mask
=
SDHCI_WAKE_ON_INSERT
|
SDHCI_WAKE_ON_REMOVE
|
SDHCI_WAKE_ON_INT
;
val
=
sdhci_readb
(
host
,
SDHCI_WAKE_UP_CONTROL
);
val
|=
mask
;
/* Avoid fake wake up */
if
(
host
->
quirks
&
SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
val
&=
~
(
SDHCI_WAKE_ON_INSERT
|
SDHCI_WAKE_ON_REMOVE
);
sdhci_writeb
(
host
,
val
,
SDHCI_WAKE_UP_CONTROL
);
}
EXPORT_SYMBOL_GPL
(
sdhci_enable_irq_wakeups
);
static
void
sdhci_disable_irq_wakeups
(
struct
sdhci_host
*
host
)
{
u8
val
;
u8
mask
=
SDHCI_WAKE_ON_INSERT
|
SDHCI_WAKE_ON_REMOVE
|
SDHCI_WAKE_ON_INT
;
val
=
sdhci_readb
(
host
,
SDHCI_WAKE_UP_CONTROL
);
val
&=
~
mask
;
sdhci_writeb
(
host
,
val
,
SDHCI_WAKE_UP_CONTROL
);
}
int
sdhci_suspend_host
(
struct
sdhci_host
*
host
)
{
sdhci_disable_card_detection
(
host
);
/* Disable tuning since we are suspending */
if
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
)
{
del_timer_sync
(
&
host
->
tuning_timer
);
host
->
flags
&=
~
SDHCI_NEEDS_RETUNING
;
}
if
(
!
device_may_wakeup
(
mmc_dev
(
host
->
mmc
)))
{
host
->
ier
=
0
;
sdhci_writel
(
host
,
0
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
0
,
SDHCI_SIGNAL_ENABLE
);
free_irq
(
host
->
irq
,
host
);
}
else
{
sdhci_enable_irq_wakeups
(
host
);
enable_irq_wake
(
host
->
irq
);
}
return
0
;
}
EXPORT_SYMBOL_GPL
(
sdhci_suspend_host
);
int
sdhci_resume_host
(
struct
sdhci_host
*
host
)
{
int
ret
=
0
;
if
(
host
->
flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
))
{
if
(
host
->
ops
->
enable_dma
)
host
->
ops
->
enable_dma
(
host
);
}
if
(
!
device_may_wakeup
(
mmc_dev
(
host
->
mmc
)))
{
ret
=
request_threaded_irq
(
host
->
irq
,
sdhci_irq
,
sdhci_thread_irq
,
IRQF_SHARED
,
mmc_hostname
(
host
->
mmc
),
host
);
if
(
ret
)
return
ret
;
}
else
{
sdhci_disable_irq_wakeups
(
host
);
disable_irq_wake
(
host
->
irq
);
}
if
((
host
->
mmc
->
pm_flags
&
MMC_PM_KEEP_POWER
)
&&
(
host
->
quirks2
&
SDHCI_QUIRK2_HOST_OFF_CARD_ON
))
{
/* Card keeps power but host controller does not */
sdhci_init
(
host
,
0
);
host
->
pwr
=
0
;
host
->
clock
=
0
;
sdhci_do_set_ios
(
host
,
&
host
->
mmc
->
ios
);
}
else
{
sdhci_init
(
host
,
(
host
->
mmc
->
pm_flags
&
MMC_PM_KEEP_POWER
));
mmiowb
();
}
sdhci_enable_card_detection
(
host
);
/* Set the re-tuning expiration flag */
if
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
)
host
->
flags
|=
SDHCI_NEEDS_RETUNING
;
return
ret
;
}
EXPORT_SYMBOL_GPL
(
sdhci_resume_host
);
static
int
sdhci_runtime_pm_get
(
struct
sdhci_host
*
host
)
{
return
pm_runtime_get_sync
(
host
->
mmc
->
parent
);
}
static
int
sdhci_runtime_pm_put
(
struct
sdhci_host
*
host
)
{
pm_runtime_mark_last_busy
(
host
->
mmc
->
parent
);
return
pm_runtime_put_autosuspend
(
host
->
mmc
->
parent
);
}
static
void
sdhci_runtime_pm_bus_on
(
struct
sdhci_host
*
host
)
{
if
(
host
->
runtime_suspended
||
host
->
bus_on
)
return
;
host
->
bus_on
=
true
;
pm_runtime_get_noresume
(
host
->
mmc
->
parent
);
}
static
void
sdhci_runtime_pm_bus_off
(
struct
sdhci_host
*
host
)
{
if
(
host
->
runtime_suspended
||
!
host
->
bus_on
)
return
;
host
->
bus_on
=
false
;
pm_runtime_put_noidle
(
host
->
mmc
->
parent
);
}
int
sdhci_runtime_suspend_host
(
struct
sdhci_host
*
host
)
{
unsigned
long
flags
;
/* Disable tuning since we are suspending */
if
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
)
{
del_timer_sync
(
&
host
->
tuning_timer
);
host
->
flags
&=
~
SDHCI_NEEDS_RETUNING
;
}
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
ier
&=
SDHCI_INT_CARD_INT
;
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
host
->
ier
,
SDHCI_SIGNAL_ENABLE
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
synchronize_hardirq
(
host
->
irq
);
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
runtime_suspended
=
true
;
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
sdhci_runtime_suspend_host
);
int
sdhci_runtime_resume_host
(
struct
sdhci_host
*
host
)
{
unsigned
long
flags
;
int
host_flags
=
host
->
flags
;
if
(
host_flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
))
{
if
(
host
->
ops
->
enable_dma
)
host
->
ops
->
enable_dma
(
host
);
}
sdhci_init
(
host
,
0
);
/* Force clock and power re-program */
host
->
pwr
=
0
;
host
->
clock
=
0
;
sdhci_do_start_signal_voltage_switch
(
host
,
&
host
->
mmc
->
ios
);
sdhci_do_set_ios
(
host
,
&
host
->
mmc
->
ios
);
if
((
host_flags
&
SDHCI_PV_ENABLED
)
&&
!
(
host
->
quirks2
&
SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
{
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
sdhci_enable_preset_value
(
host
,
true
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
/* Set the re-tuning expiration flag */
if
(
host
->
flags
&
SDHCI_USING_RETUNING_TIMER
)
host
->
flags
|=
SDHCI_NEEDS_RETUNING
;
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
runtime_suspended
=
false
;
/* Enable SDIO IRQ */
if
(
host
->
flags
&
SDHCI_SDIO_IRQ_ENABLED
)
sdhci_enable_sdio_irq_nolock
(
host
,
true
);
/* Enable Card Detection */
sdhci_enable_card_detection
(
host
);
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
sdhci_runtime_resume_host
);
#endif
/* CONFIG_PM */
/*****************************************************************************\
* *
* Device allocation/registration *
* *
\*****************************************************************************/
struct
sdhci_host
*
sdhci_alloc_host
(
struct
device
*
dev
,
size_t
priv_size
)
{
struct
mmc_host
*
mmc
;
struct
sdhci_host
*
host
;
WARN_ON
(
dev
==
NULL
);
mmc
=
mmc_alloc_host
(
sizeof
(
struct
sdhci_host
)
+
priv_size
,
dev
);
if
(
!
mmc
)
return
ERR_PTR
(
-
ENOMEM
);
host
=
mmc_priv
(
mmc
);
host
->
mmc
=
mmc
;
return
host
;
}
EXPORT_SYMBOL_GPL
(
sdhci_alloc_host
);
int
sdhci_add_host
(
struct
sdhci_host
*
host
)
{
struct
mmc_host
*
mmc
;
u32
caps
[
2
]
=
{
0
,
0
};
u32
max_current_caps
;
unsigned
int
ocr_avail
;
unsigned
int
override_timeout_clk
;
int
ret
;
WARN_ON
(
host
==
NULL
);
if
(
host
==
NULL
)
return
-
EINVAL
;
mmc
=
host
->
mmc
;
if
(
debug_quirks
)
host
->
quirks
=
debug_quirks
;
if
(
debug_quirks2
)
host
->
quirks2
=
debug_quirks2
;
override_timeout_clk
=
host
->
timeout_clk
;
sdhci_do_reset
(
host
,
SDHCI_RESET_ALL
);
host
->
version
=
sdhci_readw
(
host
,
SDHCI_HOST_VERSION
);
host
->
version
=
(
host
->
version
&
SDHCI_SPEC_VER_MASK
)
>>
SDHCI_SPEC_VER_SHIFT
;
if
(
host
->
version
>
SDHCI_SPEC_300
)
{
pr_err
(
"%s: Unknown controller version (%d). "
"You may experience problems.
\n
"
,
mmc_hostname
(
mmc
),
host
->
version
);
}
caps
[
0
]
=
(
host
->
quirks
&
SDHCI_QUIRK_MISSING_CAPS
)
?
host
->
caps
:
sdhci_readl
(
host
,
SDHCI_CAPABILITIES
);
if
(
host
->
version
>=
SDHCI_SPEC_300
)
caps
[
1
]
=
(
host
->
quirks
&
SDHCI_QUIRK_MISSING_CAPS
)
?
host
->
caps1
:
sdhci_readl
(
host
,
SDHCI_CAPABILITIES_1
);
if
(
host
->
quirks
&
SDHCI_QUIRK_FORCE_DMA
)
host
->
flags
|=
SDHCI_USE_SDMA
;
else
if
(
!
(
caps
[
0
]
&
SDHCI_CAN_DO_SDMA
))
DBG
(
"Controller doesn't have SDMA capability
\n
"
);
else
host
->
flags
|=
SDHCI_USE_SDMA
;
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_DMA
)
&&
(
host
->
flags
&
SDHCI_USE_SDMA
))
{
DBG
(
"Disabling DMA as it is marked broken
\n
"
);
host
->
flags
&=
~
SDHCI_USE_SDMA
;
}
if
((
host
->
version
>=
SDHCI_SPEC_200
)
&&
(
caps
[
0
]
&
SDHCI_CAN_DO_ADMA2
))
host
->
flags
|=
SDHCI_USE_ADMA
;
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_ADMA
)
&&
(
host
->
flags
&
SDHCI_USE_ADMA
))
{
DBG
(
"Disabling ADMA as it is marked broken
\n
"
);
host
->
flags
&=
~
SDHCI_USE_ADMA
;
}
/*
* It is assumed that a 64-bit capable device has set a 64-bit DMA mask
* and *must* do 64-bit DMA. A driver has the opportunity to change
* that during the first call to ->enable_dma(). Similarly
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
if
(
sdhci_readl
(
host
,
SDHCI_CAPABILITIES
)
&
SDHCI_CAN_64BIT
)
host
->
flags
|=
SDHCI_USE_64_BIT_DMA
;
if
(
host
->
flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
))
{
if
(
host
->
ops
->
enable_dma
)
{
if
(
host
->
ops
->
enable_dma
(
host
))
{
pr_warn
(
"%s: No suitable DMA available - falling back to PIO
\n
"
,
mmc_hostname
(
mmc
));
host
->
flags
&=
~
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
);
}
}
}
/* SDMA does not support 64-bit DMA */
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
host
->
flags
&=
~
SDHCI_USE_SDMA
;
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
/*
* The DMA descriptor table size is calculated as the maximum
* number of segments times 2, to allow for an alignment
* descriptor for each segment, plus 1 for a nop end descriptor,
* all multipled by the descriptor size.
*/
if
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
{
host
->
adma_table_sz
=
(
SDHCI_MAX_SEGS
*
2
+
1
)
*
SDHCI_ADMA2_64_DESC_SZ
;
host
->
align_buffer_sz
=
SDHCI_MAX_SEGS
*
SDHCI_ADMA2_64_ALIGN
;
host
->
desc_sz
=
SDHCI_ADMA2_64_DESC_SZ
;
host
->
align_sz
=
SDHCI_ADMA2_64_ALIGN
;
host
->
align_mask
=
SDHCI_ADMA2_64_ALIGN
-
1
;
}
else
{
host
->
adma_table_sz
=
(
SDHCI_MAX_SEGS
*
2
+
1
)
*
SDHCI_ADMA2_32_DESC_SZ
;
host
->
align_buffer_sz
=
SDHCI_MAX_SEGS
*
SDHCI_ADMA2_32_ALIGN
;
host
->
desc_sz
=
SDHCI_ADMA2_32_DESC_SZ
;
host
->
align_sz
=
SDHCI_ADMA2_32_ALIGN
;
host
->
align_mask
=
SDHCI_ADMA2_32_ALIGN
-
1
;
}
host
->
adma_table
=
dma_alloc_coherent
(
mmc_dev
(
mmc
),
host
->
adma_table_sz
,
&
host
->
adma_addr
,
GFP_KERNEL
);
host
->
align_buffer
=
kmalloc
(
host
->
align_buffer_sz
,
GFP_KERNEL
);
if
(
!
host
->
adma_table
||
!
host
->
align_buffer
)
{
dma_free_coherent
(
mmc_dev
(
mmc
),
host
->
adma_table_sz
,
host
->
adma_table
,
host
->
adma_addr
);
kfree
(
host
->
align_buffer
);
pr_warn
(
"%s: Unable to allocate ADMA buffers - falling back to standard DMA
\n
"
,
mmc_hostname
(
mmc
));
host
->
flags
&=
~
SDHCI_USE_ADMA
;
host
->
adma_table
=
NULL
;
host
->
align_buffer
=
NULL
;
}
else
if
(
host
->
adma_addr
&
host
->
align_mask
)
{
pr_warn
(
"%s: unable to allocate aligned ADMA descriptor
\n
"
,
mmc_hostname
(
mmc
));
host
->
flags
&=
~
SDHCI_USE_ADMA
;
dma_free_coherent
(
mmc_dev
(
mmc
),
host
->
adma_table_sz
,
host
->
adma_table
,
host
->
adma_addr
);
kfree
(
host
->
align_buffer
);
host
->
adma_table
=
NULL
;
host
->
align_buffer
=
NULL
;
}
}
/*
* If we use DMA, then it's up to the caller to set the DMA
* mask, but PIO does not need the hw shim so we set a new
* mask here in that case.
*/
if
(
!
(
host
->
flags
&
(
SDHCI_USE_SDMA
|
SDHCI_USE_ADMA
)))
{
host
->
dma_mask
=
DMA_BIT_MASK
(
64
);
mmc_dev
(
mmc
)
->
dma_mask
=
&
host
->
dma_mask
;
}
if
(
host
->
version
>=
SDHCI_SPEC_300
)
host
->
max_clk
=
(
caps
[
0
]
&
SDHCI_CLOCK_V3_BASE_MASK
)
>>
SDHCI_CLOCK_BASE_SHIFT
;
else
host
->
max_clk
=
(
caps
[
0
]
&
SDHCI_CLOCK_BASE_MASK
)
>>
SDHCI_CLOCK_BASE_SHIFT
;
host
->
max_clk
*=
1000000
;
if
(
host
->
max_clk
==
0
||
host
->
quirks
&
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
)
{
if
(
!
host
->
ops
->
get_max_clock
)
{
pr_err
(
"%s: Hardware doesn't specify base clock "
"frequency.
\n
"
,
mmc_hostname
(
mmc
));
return
-
ENODEV
;
}
host
->
max_clk
=
host
->
ops
->
get_max_clock
(
host
);
}
host
->
next_data
.
cookie
=
1
;
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
*/
host
->
clk_mul
=
(
caps
[
1
]
&
SDHCI_CLOCK_MUL_MASK
)
>>
SDHCI_CLOCK_MUL_SHIFT
;
/*
* In case the value in Clock Multiplier is 0, then programmable
* clock mode is not supported, otherwise the actual clock
* multiplier is one more than the value of Clock Multiplier
* in the Capabilities Register.
*/
if
(
host
->
clk_mul
)
host
->
clk_mul
+=
1
;
/*
* Set host parameters.
*/
mmc
->
ops
=
&
sdhci_ops
;
mmc
->
f_max
=
host
->
max_clk
;
if
(
host
->
ops
->
get_min_clock
)
mmc
->
f_min
=
host
->
ops
->
get_min_clock
(
host
);
else
if
(
host
->
version
>=
SDHCI_SPEC_300
)
{
if
(
host
->
clk_mul
)
{
mmc
->
f_min
=
(
host
->
max_clk
*
host
->
clk_mul
)
/
1024
;
mmc
->
f_max
=
host
->
max_clk
*
host
->
clk_mul
;
}
else
mmc
->
f_min
=
host
->
max_clk
/
SDHCI_MAX_DIV_SPEC_300
;
}
else
mmc
->
f_min
=
host
->
max_clk
/
SDHCI_MAX_DIV_SPEC_200
;
if
(
!
(
host
->
quirks
&
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
))
{
host
->
timeout_clk
=
(
caps
[
0
]
&
SDHCI_TIMEOUT_CLK_MASK
)
>>
SDHCI_TIMEOUT_CLK_SHIFT
;
if
(
host
->
timeout_clk
==
0
)
{
if
(
host
->
ops
->
get_timeout_clock
)
{
host
->
timeout_clk
=
host
->
ops
->
get_timeout_clock
(
host
);
}
else
{
pr_err
(
"%s: Hardware doesn't specify timeout clock frequency.
\n
"
,
mmc_hostname
(
mmc
));
return
-
ENODEV
;
}
}
if
(
caps
[
0
]
&
SDHCI_TIMEOUT_CLK_UNIT
)
host
->
timeout_clk
*=
1000
;
mmc
->
max_busy_timeout
=
host
->
ops
->
get_max_timeout_count
?
host
->
ops
->
get_max_timeout_count
(
host
)
:
1
<<
27
;
mmc
->
max_busy_timeout
/=
host
->
timeout_clk
;
}
if
(
override_timeout_clk
)
host
->
timeout_clk
=
override_timeout_clk
;
mmc
->
caps
|=
MMC_CAP_SDIO_IRQ
|
MMC_CAP_ERASE
|
MMC_CAP_CMD23
;
mmc
->
caps2
|=
MMC_CAP2_SDIO_IRQ_NOTHREAD
;
if
(
host
->
quirks
&
SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
host
->
flags
|=
SDHCI_AUTO_CMD12
;
/* Auto-CMD23 stuff only works in ADMA or PIO. */
if
((
host
->
version
>=
SDHCI_SPEC_300
)
&&
((
host
->
flags
&
SDHCI_USE_ADMA
)
||
!
(
host
->
flags
&
SDHCI_USE_SDMA
)))
{
host
->
flags
|=
SDHCI_AUTO_CMD23
;
DBG
(
"%s: Auto-CMD23 available
\n
"
,
mmc_hostname
(
mmc
));
}
else
{
DBG
(
"%s: Auto-CMD23 unavailable
\n
"
,
mmc_hostname
(
mmc
));
}
/*
* A controller may support 8-bit width, but the board itself
* might not have the pins brought out. Boards that support
* 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
* their platform code before calling sdhci_add_host(), and we
* won't assume 8-bit width for hosts without that CAP.
*/
if
(
!
(
host
->
quirks
&
SDHCI_QUIRK_FORCE_1_BIT_DATA
))
mmc
->
caps
|=
MMC_CAP_4_BIT_DATA
;
if
(
host
->
quirks2
&
SDHCI_QUIRK2_HOST_NO_CMD23
)
mmc
->
caps
&=
~
MMC_CAP_CMD23
;
if
(
caps
[
0
]
&
SDHCI_CAN_DO_HISPD
)
mmc
->
caps
|=
MMC_CAP_SD_HIGHSPEED
|
MMC_CAP_MMC_HIGHSPEED
;
if
((
host
->
quirks
&
SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
&&
!
(
mmc
->
caps
&
MMC_CAP_NONREMOVABLE
))
mmc
->
caps
|=
MMC_CAP_NEEDS_POLL
;
/* If there are external regulators, get them */
if
(
mmc_regulator_get_supply
(
mmc
)
==
-
EPROBE_DEFER
)
return
-
EPROBE_DEFER
;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if
(
!
IS_ERR
(
mmc
->
supply
.
vqmmc
))
{
ret
=
regulator_enable
(
mmc
->
supply
.
vqmmc
);
if
(
!
regulator_is_supported_voltage
(
mmc
->
supply
.
vqmmc
,
1700000
,
1950000
))
caps
[
1
]
&=
~
(
SDHCI_SUPPORT_SDR104
|
SDHCI_SUPPORT_SDR50
|
SDHCI_SUPPORT_DDR50
);
if
(
ret
)
{
pr_warn
(
"%s: Failed to enable vqmmc regulator: %d
\n
"
,
mmc_hostname
(
mmc
),
ret
);
mmc
->
supply
.
vqmmc
=
ERR_PTR
(
-
EINVAL
);
}
}
if
(
host
->
quirks2
&
SDHCI_QUIRK2_NO_1_8_V
)
caps
[
1
]
&=
~
(
SDHCI_SUPPORT_SDR104
|
SDHCI_SUPPORT_SDR50
|
SDHCI_SUPPORT_DDR50
);
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
if
(
caps
[
1
]
&
(
SDHCI_SUPPORT_SDR104
|
SDHCI_SUPPORT_SDR50
|
SDHCI_SUPPORT_DDR50
))
mmc
->
caps
|=
MMC_CAP_UHS_SDR12
|
MMC_CAP_UHS_SDR25
;
/* SDR104 supports also implies SDR50 support */
if
(
caps
[
1
]
&
SDHCI_SUPPORT_SDR104
)
{
mmc
->
caps
|=
MMC_CAP_UHS_SDR104
|
MMC_CAP_UHS_SDR50
;
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
if
(
!
(
host
->
quirks2
&
SDHCI_QUIRK2_BROKEN_HS200
))
mmc
->
caps2
|=
MMC_CAP2_HS200
;
}
else
if
(
caps
[
1
]
&
SDHCI_SUPPORT_SDR50
)
mmc
->
caps
|=
MMC_CAP_UHS_SDR50
;
if
(
host
->
quirks2
&
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
(
caps
[
1
]
&
SDHCI_SUPPORT_HS400
))
mmc
->
caps2
|=
MMC_CAP2_HS400
;
if
((
mmc
->
caps2
&
MMC_CAP2_HSX00_1_2V
)
&&
(
IS_ERR
(
mmc
->
supply
.
vqmmc
)
||
!
regulator_is_supported_voltage
(
mmc
->
supply
.
vqmmc
,
1100000
,
1300000
)))
mmc
->
caps2
&=
~
MMC_CAP2_HSX00_1_2V
;
if
((
caps
[
1
]
&
SDHCI_SUPPORT_DDR50
)
&&
!
(
host
->
quirks2
&
SDHCI_QUIRK2_BROKEN_DDR50
))
mmc
->
caps
|=
MMC_CAP_UHS_DDR50
;
/* Does the host need tuning for SDR50? */
if
(
caps
[
1
]
&
SDHCI_USE_SDR50_TUNING
)
host
->
flags
|=
SDHCI_SDR50_NEEDS_TUNING
;
/* Does the host need tuning for SDR104 / HS200? */
if
(
mmc
->
caps2
&
MMC_CAP2_HS200
)
host
->
flags
|=
SDHCI_SDR104_NEEDS_TUNING
;
/* Driver Type(s) (A, C, D) supported by the host */
if
(
caps
[
1
]
&
SDHCI_DRIVER_TYPE_A
)
mmc
->
caps
|=
MMC_CAP_DRIVER_TYPE_A
;
if
(
caps
[
1
]
&
SDHCI_DRIVER_TYPE_C
)
mmc
->
caps
|=
MMC_CAP_DRIVER_TYPE_C
;
if
(
caps
[
1
]
&
SDHCI_DRIVER_TYPE_D
)
mmc
->
caps
|=
MMC_CAP_DRIVER_TYPE_D
;
/* Initial value for re-tuning timer count */
host
->
tuning_count
=
(
caps
[
1
]
&
SDHCI_RETUNING_TIMER_COUNT_MASK
)
>>
SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
/*
* In case Re-tuning Timer is not disabled, the actual value of
* re-tuning timer will be 2 ^ (n - 1).
*/
if
(
host
->
tuning_count
)
host
->
tuning_count
=
1
<<
(
host
->
tuning_count
-
1
);
/* Re-tuning mode supported by the Host Controller */
host
->
tuning_mode
=
(
caps
[
1
]
&
SDHCI_RETUNING_MODE_MASK
)
>>
SDHCI_RETUNING_MODE_SHIFT
;
ocr_avail
=
0
;
/*
* According to SD Host Controller spec v3.00, if the Host System
* can afford more than 150mA, Host Driver should set XPC to 1. Also
* the value is meaningful only if Voltage Support in the Capabilities
* register is set. The actual current value is 4 times the register
* value.
*/
max_current_caps
=
sdhci_readl
(
host
,
SDHCI_MAX_CURRENT
);
if
(
!
max_current_caps
&&
!
IS_ERR
(
mmc
->
supply
.
vmmc
))
{
int
curr
=
regulator_get_current_limit
(
mmc
->
supply
.
vmmc
);
if
(
curr
>
0
)
{
/* convert to SDHCI_MAX_CURRENT format */
curr
=
curr
/
1000
;
/* convert to mA */
curr
=
curr
/
SDHCI_MAX_CURRENT_MULTIPLIER
;
curr
=
min_t
(
u32
,
curr
,
SDHCI_MAX_CURRENT_LIMIT
);
max_current_caps
=
(
curr
<<
SDHCI_MAX_CURRENT_330_SHIFT
)
|
(
curr
<<
SDHCI_MAX_CURRENT_300_SHIFT
)
|
(
curr
<<
SDHCI_MAX_CURRENT_180_SHIFT
);
}
}
if
(
caps
[
0
]
&
SDHCI_CAN_VDD_330
)
{
ocr_avail
|=
MMC_VDD_32_33
|
MMC_VDD_33_34
;
mmc
->
max_current_330
=
((
max_current_caps
&
SDHCI_MAX_CURRENT_330_MASK
)
>>
SDHCI_MAX_CURRENT_330_SHIFT
)
*
SDHCI_MAX_CURRENT_MULTIPLIER
;
}
if
(
caps
[
0
]
&
SDHCI_CAN_VDD_300
)
{
ocr_avail
|=
MMC_VDD_29_30
|
MMC_VDD_30_31
;
mmc
->
max_current_300
=
((
max_current_caps
&
SDHCI_MAX_CURRENT_300_MASK
)
>>
SDHCI_MAX_CURRENT_300_SHIFT
)
*
SDHCI_MAX_CURRENT_MULTIPLIER
;
}
if
(
caps
[
0
]
&
SDHCI_CAN_VDD_180
)
{
ocr_avail
|=
MMC_VDD_165_195
;
mmc
->
max_current_180
=
((
max_current_caps
&
SDHCI_MAX_CURRENT_180_MASK
)
>>
SDHCI_MAX_CURRENT_180_SHIFT
)
*
SDHCI_MAX_CURRENT_MULTIPLIER
;
}
/* If OCR set by external regulators, use it instead */
if
(
mmc
->
ocr_avail
)
ocr_avail
=
mmc
->
ocr_avail
;
if
(
host
->
ocr_mask
)
ocr_avail
&=
host
->
ocr_mask
;
mmc
->
ocr_avail
=
ocr_avail
;
mmc
->
ocr_avail_sdio
=
ocr_avail
;
if
(
host
->
ocr_avail_sdio
)
mmc
->
ocr_avail_sdio
&=
host
->
ocr_avail_sdio
;
mmc
->
ocr_avail_sd
=
ocr_avail
;
if
(
host
->
ocr_avail_sd
)
mmc
->
ocr_avail_sd
&=
host
->
ocr_avail_sd
;
else
/* normal SD controllers don't support 1.8V */
mmc
->
ocr_avail_sd
&=
~
MMC_VDD_165_195
;
mmc
->
ocr_avail_mmc
=
ocr_avail
;
if
(
host
->
ocr_avail_mmc
)
mmc
->
ocr_avail_mmc
&=
host
->
ocr_avail_mmc
;
if
(
mmc
->
ocr_avail
==
0
)
{
pr_err
(
"%s: Hardware doesn't report any "
"support voltages.
\n
"
,
mmc_hostname
(
mmc
));
return
-
ENODEV
;
}
spin_lock_init
(
&
host
->
lock
);
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
*/
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
mmc
->
max_segs
=
SDHCI_MAX_SEGS
;
else
if
(
host
->
flags
&
SDHCI_USE_SDMA
)
mmc
->
max_segs
=
1
;
else
/* PIO */
mmc
->
max_segs
=
SDHCI_MAX_SEGS
;
/*
* Maximum number of sectors in one transfer. Limited by SDMA boundary
* size (512KiB). Note some tuning modes impose a 4MiB limit, but this
* is less anyway.
*/
mmc
->
max_req_size
=
524288
;
/*
* Maximum segment size. Could be one segment with the maximum number
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
if
(
host
->
flags
&
SDHCI_USE_ADMA
)
{
if
(
host
->
quirks
&
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
mmc
->
max_seg_size
=
65535
;
else
mmc
->
max_seg_size
=
65536
;
}
else
{
mmc
->
max_seg_size
=
mmc
->
max_req_size
;
}
/*
* Maximum block size. This varies from controller to controller and
* is specified in the capabilities register.
*/
if
(
host
->
quirks
&
SDHCI_QUIRK_FORCE_BLK_SZ_2048
)
{
mmc
->
max_blk_size
=
2
;
}
else
{
mmc
->
max_blk_size
=
(
caps
[
0
]
&
SDHCI_MAX_BLOCK_MASK
)
>>
SDHCI_MAX_BLOCK_SHIFT
;
if
(
mmc
->
max_blk_size
>=
3
)
{
pr_warn
(
"%s: Invalid maximum block size, assuming 512 bytes
\n
"
,
mmc_hostname
(
mmc
));
mmc
->
max_blk_size
=
0
;
}
}
mmc
->
max_blk_size
=
512
<<
mmc
->
max_blk_size
;
/*
* Maximum block count.
*/
mmc
->
max_blk_count
=
(
host
->
quirks
&
SDHCI_QUIRK_NO_MULTIBLOCK
)
?
1
:
65535
;
/*
* Init tasklets.
*/
tasklet_init
(
&
host
->
finish_tasklet
,
sdhci_tasklet_finish
,
(
unsigned
long
)
host
);
setup_timer
(
&
host
->
timer
,
sdhci_timeout_timer
,
(
unsigned
long
)
host
);
init_waitqueue_head
(
&
host
->
buf_ready_int
);
if
(
host
->
version
>=
SDHCI_SPEC_300
)
{
/* Initialize re-tuning timer */
init_timer
(
&
host
->
tuning_timer
);
host
->
tuning_timer
.
data
=
(
unsigned
long
)
host
;
host
->
tuning_timer
.
function
=
sdhci_tuning_timer
;
}
sdhci_init
(
host
,
0
);
ret
=
request_threaded_irq
(
host
->
irq
,
sdhci_irq
,
sdhci_thread_irq
,
IRQF_SHARED
,
mmc_hostname
(
mmc
),
host
);
if
(
ret
)
{
pr_err
(
"%s: Failed to request IRQ %d: %d
\n
"
,
mmc_hostname
(
mmc
),
host
->
irq
,
ret
);
goto
untasklet
;
}
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs
(
host
);
#endif
#ifdef SDHCI_USE_LEDS_CLASS
snprintf
(
host
->
led_name
,
sizeof
(
host
->
led_name
),
"%s::"
,
mmc_hostname
(
mmc
));
host
->
led
.
name
=
host
->
led_name
;
host
->
led
.
brightness
=
LED_OFF
;
host
->
led
.
default_trigger
=
mmc_hostname
(
mmc
);
host
->
led
.
brightness_set
=
sdhci_led_control
;
ret
=
led_classdev_register
(
mmc_dev
(
mmc
),
&
host
->
led
);
if
(
ret
)
{
pr_err
(
"%s: Failed to register LED device: %d
\n
"
,
mmc_hostname
(
mmc
),
ret
);
goto
reset
;
}
#endif
mmiowb
();
mmc_add_host
(
mmc
);
pr_info
(
"%s: SDHCI controller on %s [%s] using %s
\n
"
,
mmc_hostname
(
mmc
),
host
->
hw_name
,
dev_name
(
mmc_dev
(
mmc
)),
(
host
->
flags
&
SDHCI_USE_ADMA
)
?
(
host
->
flags
&
SDHCI_USE_64_BIT_DMA
)
?
"ADMA 64-bit"
:
"ADMA"
:
(
host
->
flags
&
SDHCI_USE_SDMA
)
?
"DMA"
:
"PIO"
);
sdhci_enable_card_detection
(
host
);
return
0
;
#ifdef SDHCI_USE_LEDS_CLASS
reset:
sdhci_do_reset
(
host
,
SDHCI_RESET_ALL
);
sdhci_writel
(
host
,
0
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
0
,
SDHCI_SIGNAL_ENABLE
);
free_irq
(
host
->
irq
,
host
);
#endif
untasklet:
tasklet_kill
(
&
host
->
finish_tasklet
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
sdhci_add_host
);
void
sdhci_remove_host
(
struct
sdhci_host
*
host
,
int
dead
)
{
struct
mmc_host
*
mmc
=
host
->
mmc
;
unsigned
long
flags
;
if
(
dead
)
{
spin_lock_irqsave
(
&
host
->
lock
,
flags
);
host
->
flags
|=
SDHCI_DEVICE_DEAD
;
if
(
host
->
mrq
)
{
pr_err
(
"%s: Controller removed during "
" transfer!
\n
"
,
mmc_hostname
(
mmc
));
host
->
mrq
->
cmd
->
error
=
-
ENOMEDIUM
;
tasklet_schedule
(
&
host
->
finish_tasklet
);
}
spin_unlock_irqrestore
(
&
host
->
lock
,
flags
);
}
sdhci_disable_card_detection
(
host
);
mmc_remove_host
(
mmc
);
#ifdef SDHCI_USE_LEDS_CLASS
led_classdev_unregister
(
&
host
->
led
);
#endif
if
(
!
dead
)
sdhci_do_reset
(
host
,
SDHCI_RESET_ALL
);
sdhci_writel
(
host
,
0
,
SDHCI_INT_ENABLE
);
sdhci_writel
(
host
,
0
,
SDHCI_SIGNAL_ENABLE
);
free_irq
(
host
->
irq
,
host
);
del_timer_sync
(
&
host
->
timer
);
tasklet_kill
(
&
host
->
finish_tasklet
);
if
(
!
IS_ERR
(
mmc
->
supply
.
vqmmc
))
regulator_disable
(
mmc
->
supply
.
vqmmc
);
if
(
host
->
adma_table
)
dma_free_coherent
(
mmc_dev
(
mmc
),
host
->
adma_table_sz
,
host
->
adma_table
,
host
->
adma_addr
);
kfree
(
host
->
align_buffer
);
host
->
adma_table
=
NULL
;
host
->
align_buffer
=
NULL
;
}
EXPORT_SYMBOL_GPL
(
sdhci_remove_host
);
void
sdhci_free_host
(
struct
sdhci_host
*
host
)
{
mmc_free_host
(
host
->
mmc
);
}
EXPORT_SYMBOL_GPL
(
sdhci_free_host
);
/*****************************************************************************\
* *
* Driver init/exit *
* *
\*****************************************************************************/
static
int
__init
sdhci_drv_init
(
void
)
{
pr_info
(
DRIVER_NAME
": Secure Digital Host Controller Interface driver
\n
"
);
pr_info
(
DRIVER_NAME
": Copyright(c) Pierre Ossman
\n
"
);
return
0
;
}
static
void
__exit
sdhci_drv_exit
(
void
)
{
}
module_init
(
sdhci_drv_init
);
module_exit
(
sdhci_drv_exit
);
module_param
(
debug_quirks
,
uint
,
0444
);
module_param
(
debug_quirks2
,
uint
,
0444
);
MODULE_AUTHOR
(
"Pierre Ossman <pierre@ossman.eu>"
);
MODULE_DESCRIPTION
(
"Secure Digital Host Controller Interface core driver"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_PARM_DESC
(
debug_quirks
,
"Force certain quirks."
);
MODULE_PARM_DESC
(
debug_quirks2
,
"Force certain other quirks."
);
src/drivers/mmc/host/sdhci.h
0 → 100644
View file @
2106955d
/*
* linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
*
* Header file for Host Controller registers and I/O accessors.
*
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#ifndef __SDHCI_HW_H
#define __SDHCI_HW_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mmc/sdhci.h>
/*
* Controller registers
*/
#define SDHCI_DMA_ADDRESS 0x00
#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
#define SDHCI_BLOCK_SIZE 0x04
#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
#define SDHCI_BLOCK_COUNT 0x06
#define SDHCI_ARGUMENT 0x08
#define SDHCI_TRANSFER_MODE 0x0C
#define SDHCI_TRNS_DMA 0x01
#define SDHCI_TRNS_BLK_CNT_EN 0x02
#define SDHCI_TRNS_AUTO_CMD12 0x04
#define SDHCI_TRNS_AUTO_CMD23 0x08
#define SDHCI_TRNS_READ 0x10
#define SDHCI_TRNS_MULTI 0x20
#define SDHCI_COMMAND 0x0E
#define SDHCI_CMD_RESP_MASK 0x03
#define SDHCI_CMD_CRC 0x08
#define SDHCI_CMD_INDEX 0x10
#define SDHCI_CMD_DATA 0x20
#define SDHCI_CMD_ABORTCMD 0xC0
#define SDHCI_CMD_RESP_NONE 0x00
#define SDHCI_CMD_RESP_LONG 0x01
#define SDHCI_CMD_RESP_SHORT 0x02
#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
#define SDHCI_RESPONSE 0x10
#define SDHCI_BUFFER 0x20
#define SDHCI_PRESENT_STATE 0x24
#define SDHCI_CMD_INHIBIT 0x00000001
#define SDHCI_DATA_INHIBIT 0x00000002
#define SDHCI_DOING_WRITE 0x00000100
#define SDHCI_DOING_READ 0x00000200
#define SDHCI_SPACE_AVAILABLE 0x00000400
#define SDHCI_DATA_AVAILABLE 0x00000800
#define SDHCI_CARD_PRESENT 0x00010000
#define SDHCI_DAT3_PRESENT 0x00800000
#define SDHCI_WRITE_PROTECT 0x00080000
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
#define SDHCI_DATA_0_LVL_MASK 0x00100000
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
#define SDHCI_CTRL_4BITBUS 0x02
#define SDHCI_CTRL_HISPD 0x04
#define SDHCI_CTRL_DMA_MASK 0x18
#define SDHCI_CTRL_SDMA 0x00
#define SDHCI_CTRL_ADMA1 0x08
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_POWER_CONTROL 0x29
#define SDHCI_POWER_ON 0x01
#define SDHCI_POWER_180 0x0A
#define SDHCI_POWER_300 0x0C
#define SDHCI_POWER_330 0x0E
#define SDHCI_BLOCK_GAP_CONTROL 0x2A
#define SDHCI_WAKE_UP_CONTROL 0x2B
#define SDHCI_WAKE_ON_INT 0x01
#define SDHCI_WAKE_ON_INSERT 0x02
#define SDHCI_WAKE_ON_REMOVE 0x04
#define SDHCI_CLOCK_CONTROL 0x2C
#define SDHCI_DIVIDER_SHIFT 8
#define SDHCI_DIVIDER_HI_SHIFT 6
#define SDHCI_DIV_MASK 0xFF
#define SDHCI_DIV_MASK_LEN 8
#define SDHCI_DIV_HI_MASK 0x300
#define SDHCI_PROG_CLOCK_MODE 0x0020
#define SDHCI_CLOCK_CARD_EN 0x0004
#define SDHCI_CLOCK_INT_STABLE 0x0002
#define SDHCI_CLOCK_INT_EN 0x0001
#define SDHCI_TIMEOUT_CONTROL 0x2E
#define SDHCI_SOFTWARE_RESET 0x2F
#define SDHCI_RESET_ALL 0x01
#define SDHCI_RESET_CMD 0x02
#define SDHCI_RESET_DATA 0x04
#define SDHCI_INT_STATUS 0x30
#define SDHCI_INT_ENABLE 0x34
#define SDHCI_SIGNAL_ENABLE 0x38
#define SDHCI_INT_RESPONSE 0x00000001
#define SDHCI_INT_DATA_END 0x00000002
#define SDHCI_INT_BLK_GAP 0x00000004
#define SDHCI_INT_DMA_END 0x00000008
#define SDHCI_INT_SPACE_AVAIL 0x00000010
#define SDHCI_INT_DATA_AVAIL 0x00000020
#define SDHCI_INT_CARD_INSERT 0x00000040
#define SDHCI_INT_CARD_REMOVE 0x00000080
#define SDHCI_INT_CARD_INT 0x00000100
#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
#define SDHCI_INT_CRC 0x00020000
#define SDHCI_INT_END_BIT 0x00040000
#define SDHCI_INT_INDEX 0x00080000
#define SDHCI_INT_DATA_TIMEOUT 0x00100000
#define SDHCI_INT_DATA_CRC 0x00200000
#define SDHCI_INT_DATA_END_BIT 0x00400000
#define SDHCI_INT_BUS_POWER 0x00800000
#define SDHCI_INT_ACMD12ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
SDHCI_INT_BLK_GAP)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_ACMD12_ERR 0x3C
#define SDHCI_HOST_CONTROL2 0x3E
#define SDHCI_CTRL_UHS_MASK 0x0007
#define SDHCI_CTRL_UHS_SDR12 0x0000
#define SDHCI_CTRL_UHS_SDR25 0x0001
#define SDHCI_CTRL_UHS_SDR50 0x0002
#define SDHCI_CTRL_UHS_SDR104 0x0003
#define SDHCI_CTRL_UHS_DDR50 0x0004
#define SDHCI_CTRL_HS400 0x0005
/* Non-standard */
#define SDHCI_CTRL_VDD_180 0x0008
#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define SDHCI_CTRL_DRV_TYPE_B 0x0000
#define SDHCI_CTRL_DRV_TYPE_A 0x0010
#define SDHCI_CTRL_DRV_TYPE_C 0x0020
#define SDHCI_CTRL_DRV_TYPE_D 0x0030
#define SDHCI_CTRL_EXEC_TUNING 0x0040
#define SDHCI_CTRL_TUNED_CLK 0x0080
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
#define SDHCI_TIMEOUT_CLK_SHIFT 0
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define SDHCI_CLOCK_BASE_MASK 0x00003F00
#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
#define SDHCI_CLOCK_BASE_SHIFT 8
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
#define SDHCI_CAN_DO_8BIT 0x00040000
#define SDHCI_CAN_DO_ADMA2 0x00080000
#define SDHCI_CAN_DO_ADMA1 0x00100000
#define SDHCI_CAN_DO_HISPD 0x00200000
#define SDHCI_CAN_DO_SDMA 0x00400000
#define SDHCI_CAN_VDD_330 0x01000000
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000
#define SDHCI_SUPPORT_SDR50 0x00000001
#define SDHCI_SUPPORT_SDR104 0x00000002
#define SDHCI_SUPPORT_DDR50 0x00000004
#define SDHCI_DRIVER_TYPE_A 0x00000010
#define SDHCI_DRIVER_TYPE_C 0x00000020
#define SDHCI_DRIVER_TYPE_D 0x00000040
#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
#define SDHCI_USE_SDR50_TUNING 0x00002000
#define SDHCI_RETUNING_MODE_MASK 0x0000C000
#define SDHCI_RETUNING_MODE_SHIFT 14
#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
#define SDHCI_CLOCK_MUL_SHIFT 16
#define SDHCI_SUPPORT_HS400 0x80000000
/* Non-standard */
#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48
#define SDHCI_MAX_CURRENT_LIMIT 0xFF
#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
#define SDHCI_MAX_CURRENT_330_SHIFT 0
#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
#define SDHCI_MAX_CURRENT_300_SHIFT 8
#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
#define SDHCI_MAX_CURRENT_180_SHIFT 16
#define SDHCI_MAX_CURRENT_MULTIPLIER 4
/* 4C-4F reserved for more max current */
#define SDHCI_SET_ACMD12_ERROR 0x50
#define SDHCI_SET_INT_ERROR 0x52
#define SDHCI_ADMA_ERROR 0x54
/* 55-57 reserved */
#define SDHCI_ADMA_ADDRESS 0x58
#define SDHCI_ADMA_ADDRESS_HI 0x5C
/* 60-FB reserved */
#define SDHCI_PRESET_FOR_SDR12 0x66
#define SDHCI_PRESET_FOR_SDR25 0x68
#define SDHCI_PRESET_FOR_SDR50 0x6A
#define SDHCI_PRESET_FOR_SDR104 0x6C
#define SDHCI_PRESET_FOR_DDR50 0x6E
#define SDHCI_PRESET_FOR_HS400 0x74
/* Non-standard */
#define SDHCI_PRESET_DRV_MASK 0xC000
#define SDHCI_PRESET_DRV_SHIFT 14
#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
#define SDHCI_SLOT_INT_STATUS 0xFC
#define SDHCI_HOST_VERSION 0xFE
#define SDHCI_VENDOR_VER_MASK 0xFF00
#define SDHCI_VENDOR_VER_SHIFT 8
#define SDHCI_SPEC_VER_MASK 0x00FF
#define SDHCI_SPEC_VER_SHIFT 0
#define SDHCI_SPEC_100 0
#define SDHCI_SPEC_200 1
#define SDHCI_SPEC_300 2
/*
* End of controller registers.
*/
#define SDHCI_MAX_DIV_SPEC_200 256
#define SDHCI_MAX_DIV_SPEC_300 2046
/*
* Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
*/
#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
/* ADMA2 32-bit DMA descriptor size */
#define SDHCI_ADMA2_32_DESC_SZ 8
/* ADMA2 32-bit DMA alignment */
#define SDHCI_ADMA2_32_ALIGN 4
/* ADMA2 32-bit descriptor */
struct
sdhci_adma2_32_desc
{
__le16
cmd
;
__le16
len
;
__le32
addr
;
}
__packed
__aligned
(
SDHCI_ADMA2_32_ALIGN
);
/* ADMA2 64-bit DMA descriptor size */
#define SDHCI_ADMA2_64_DESC_SZ 12
/* ADMA2 64-bit DMA alignment */
#define SDHCI_ADMA2_64_ALIGN 8
/*
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
* aligned.
*/
struct
sdhci_adma2_64_desc
{
__le16
cmd
;
__le16
len
;
__le32
addr_lo
;
__le32
addr_hi
;
}
__packed
__aligned
(
4
);
#define ADMA2_TRAN_VALID 0x21
#define ADMA2_NOP_END_VALID 0x3
#define ADMA2_END 0x2
/*
* Maximum segments assuming a 512KiB maximum requisition size and a minimum
* 4KiB page size.
*/
#define SDHCI_MAX_SEGS 128
struct
sdhci_ops
{
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
u32
(
*
read_l
)(
struct
sdhci_host
*
host
,
int
reg
);
u16
(
*
read_w
)(
struct
sdhci_host
*
host
,
int
reg
);
u8
(
*
read_b
)(
struct
sdhci_host
*
host
,
int
reg
);
void
(
*
write_l
)(
struct
sdhci_host
*
host
,
u32
val
,
int
reg
);
void
(
*
write_w
)(
struct
sdhci_host
*
host
,
u16
val
,
int
reg
);
void
(
*
write_b
)(
struct
sdhci_host
*
host
,
u8
val
,
int
reg
);
#endif
void
(
*
set_clock
)(
struct
sdhci_host
*
host
,
unsigned
int
clock
);
int
(
*
enable_dma
)(
struct
sdhci_host
*
host
);
unsigned
int
(
*
get_max_clock
)(
struct
sdhci_host
*
host
);
unsigned
int
(
*
get_min_clock
)(
struct
sdhci_host
*
host
);
unsigned
int
(
*
get_timeout_clock
)(
struct
sdhci_host
*
host
);
unsigned
int
(
*
get_max_timeout_count
)(
struct
sdhci_host
*
host
);
void
(
*
set_timeout
)(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
);
void
(
*
set_bus_width
)(
struct
sdhci_host
*
host
,
int
width
);
void
(
*
platform_send_init_74_clocks
)(
struct
sdhci_host
*
host
,
u8
power_mode
);
unsigned
int
(
*
get_ro
)(
struct
sdhci_host
*
host
);
void
(
*
reset
)(
struct
sdhci_host
*
host
,
u8
mask
);
int
(
*
platform_execute_tuning
)(
struct
sdhci_host
*
host
,
u32
opcode
);
void
(
*
set_uhs_signaling
)(
struct
sdhci_host
*
host
,
unsigned
int
uhs
);
void
(
*
hw_reset
)(
struct
sdhci_host
*
host
);
void
(
*
adma_workaround
)(
struct
sdhci_host
*
host
,
u32
intmask
);
void
(
*
platform_init
)(
struct
sdhci_host
*
host
);
void
(
*
card_event
)(
struct
sdhci_host
*
host
);
void
(
*
voltage_switch
)(
struct
sdhci_host
*
host
);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
static
inline
void
sdhci_writel
(
struct
sdhci_host
*
host
,
u32
val
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
write_l
))
host
->
ops
->
write_l
(
host
,
val
,
reg
);
else
writel
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
void
sdhci_writew
(
struct
sdhci_host
*
host
,
u16
val
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
write_w
))
host
->
ops
->
write_w
(
host
,
val
,
reg
);
else
writew
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
void
sdhci_writeb
(
struct
sdhci_host
*
host
,
u8
val
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
write_b
))
host
->
ops
->
write_b
(
host
,
val
,
reg
);
else
writeb
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
u32
sdhci_readl
(
struct
sdhci_host
*
host
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
read_l
))
return
host
->
ops
->
read_l
(
host
,
reg
);
else
return
readl
(
host
->
ioaddr
+
reg
);
}
static
inline
u16
sdhci_readw
(
struct
sdhci_host
*
host
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
read_w
))
return
host
->
ops
->
read_w
(
host
,
reg
);
else
return
readw
(
host
->
ioaddr
+
reg
);
}
static
inline
u8
sdhci_readb
(
struct
sdhci_host
*
host
,
int
reg
)
{
if
(
unlikely
(
host
->
ops
->
read_b
))
return
host
->
ops
->
read_b
(
host
,
reg
);
else
return
readb
(
host
->
ioaddr
+
reg
);
}
#else
static
inline
void
sdhci_writel
(
struct
sdhci_host
*
host
,
u32
val
,
int
reg
)
{
writel
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
void
sdhci_writew
(
struct
sdhci_host
*
host
,
u16
val
,
int
reg
)
{
writew
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
void
sdhci_writeb
(
struct
sdhci_host
*
host
,
u8
val
,
int
reg
)
{
writeb
(
val
,
host
->
ioaddr
+
reg
);
}
static
inline
u32
sdhci_readl
(
struct
sdhci_host
*
host
,
int
reg
)
{
return
readl
(
host
->
ioaddr
+
reg
);
}
static
inline
u16
sdhci_readw
(
struct
sdhci_host
*
host
,
int
reg
)
{
return
readw
(
host
->
ioaddr
+
reg
);
}
static
inline
u8
sdhci_readb
(
struct
sdhci_host
*
host
,
int
reg
)
{
return
readb
(
host
->
ioaddr
+
reg
);
}
#endif
/* CONFIG_MMC_SDHCI_IO_ACCESSORS */
extern
struct
sdhci_host
*
sdhci_alloc_host
(
struct
device
*
dev
,
size_t
priv_size
);
extern
void
sdhci_free_host
(
struct
sdhci_host
*
host
);
static
inline
void
*
sdhci_priv
(
struct
sdhci_host
*
host
)
{
return
(
void
*
)
host
->
private
;
}
extern
void
sdhci_card_detect
(
struct
sdhci_host
*
host
);
extern
int
sdhci_add_host
(
struct
sdhci_host
*
host
);
extern
void
sdhci_remove_host
(
struct
sdhci_host
*
host
,
int
dead
);
extern
void
sdhci_send_command
(
struct
sdhci_host
*
host
,
struct
mmc_command
*
cmd
);
static
inline
bool
sdhci_sdio_irq_enabled
(
struct
sdhci_host
*
host
)
{
return
!!
(
host
->
flags
&
SDHCI_SDIO_IRQ_ENABLED
);
}
void
sdhci_set_clock
(
struct
sdhci_host
*
host
,
unsigned
int
clock
);
void
sdhci_set_bus_width
(
struct
sdhci_host
*
host
,
int
width
);
void
sdhci_reset
(
struct
sdhci_host
*
host
,
u8
mask
);
void
sdhci_set_uhs_signaling
(
struct
sdhci_host
*
host
,
unsigned
timing
);
#ifdef CONFIG_PM
extern
int
sdhci_suspend_host
(
struct
sdhci_host
*
host
);
extern
int
sdhci_resume_host
(
struct
sdhci_host
*
host
);
extern
void
sdhci_enable_irq_wakeups
(
struct
sdhci_host
*
host
);
extern
int
sdhci_runtime_suspend_host
(
struct
sdhci_host
*
host
);
extern
int
sdhci_runtime_resume_host
(
struct
sdhci_host
*
host
);
#endif
#endif
/* __SDHCI_HW_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment