@@ -1566,6 +1566,86 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
15661566 return & udev -> se_dev ;
15671567}
15681568
1569+ static void tcmu_dev_call_rcu (struct rcu_head * p )
1570+ {
1571+ struct se_device * dev = container_of (p , struct se_device , rcu_head );
1572+ struct tcmu_dev * udev = TCMU_DEV (dev );
1573+
1574+ kfree (udev -> uio_info .name );
1575+ kfree (udev -> name );
1576+ kfree (udev );
1577+ }
1578+
1579+ static int tcmu_check_and_free_pending_cmd (struct tcmu_cmd * cmd )
1580+ {
1581+ if (test_bit (TCMU_CMD_BIT_EXPIRED , & cmd -> flags )) {
1582+ kmem_cache_free (tcmu_cmd_cache , cmd );
1583+ return 0 ;
1584+ }
1585+ return - EINVAL ;
1586+ }
1587+
1588+ static void tcmu_blocks_release (struct radix_tree_root * blocks ,
1589+ int start , int end )
1590+ {
1591+ int i ;
1592+ struct page * page ;
1593+
1594+ for (i = start ; i < end ; i ++ ) {
1595+ page = radix_tree_delete (blocks , i );
1596+ if (page ) {
1597+ __free_page (page );
1598+ atomic_dec (& global_db_count );
1599+ }
1600+ }
1601+ }
1602+
1603+ static void tcmu_remove_all_queued_tmr (struct tcmu_dev * udev )
1604+ {
1605+ struct tcmu_tmr * tmr , * tmp ;
1606+
1607+ list_for_each_entry_safe (tmr , tmp , & udev -> tmr_queue , queue_entry ) {
1608+ list_del_init (& tmr -> queue_entry );
1609+ kfree (tmr );
1610+ }
1611+ }
1612+
1613+ static void tcmu_dev_kref_release (struct kref * kref )
1614+ {
1615+ struct tcmu_dev * udev = container_of (kref , struct tcmu_dev , kref );
1616+ struct se_device * dev = & udev -> se_dev ;
1617+ struct tcmu_cmd * cmd ;
1618+ bool all_expired = true;
1619+ int i ;
1620+
1621+ vfree (udev -> mb_addr );
1622+ udev -> mb_addr = NULL ;
1623+
1624+ spin_lock_bh (& timed_out_udevs_lock );
1625+ if (!list_empty (& udev -> timedout_entry ))
1626+ list_del (& udev -> timedout_entry );
1627+ spin_unlock_bh (& timed_out_udevs_lock );
1628+
1629+ /* Upper layer should drain all requests before calling this */
1630+ mutex_lock (& udev -> cmdr_lock );
1631+ idr_for_each_entry (& udev -> commands , cmd , i ) {
1632+ if (tcmu_check_and_free_pending_cmd (cmd ) != 0 )
1633+ all_expired = false;
1634+ }
1635+ /* There can be left over TMR cmds. Remove them. */
1636+ tcmu_remove_all_queued_tmr (udev );
1637+ if (!list_empty (& udev -> qfull_queue ))
1638+ all_expired = false;
1639+ idr_destroy (& udev -> commands );
1640+ WARN_ON (!all_expired );
1641+
1642+ tcmu_blocks_release (& udev -> data_blocks , 0 , udev -> dbi_max + 1 );
1643+ bitmap_free (udev -> data_bitmap );
1644+ mutex_unlock (& udev -> cmdr_lock );
1645+
1646+ call_rcu (& dev -> rcu_head , tcmu_dev_call_rcu );
1647+ }
1648+
15691649static void run_qfull_queue (struct tcmu_dev * udev , bool fail )
15701650{
15711651 struct tcmu_cmd * tcmu_cmd , * tmp_cmd ;
@@ -1751,86 +1831,6 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
17511831 return 0 ;
17521832}
17531833
1754- static void tcmu_dev_call_rcu (struct rcu_head * p )
1755- {
1756- struct se_device * dev = container_of (p , struct se_device , rcu_head );
1757- struct tcmu_dev * udev = TCMU_DEV (dev );
1758-
1759- kfree (udev -> uio_info .name );
1760- kfree (udev -> name );
1761- kfree (udev );
1762- }
1763-
1764- static int tcmu_check_and_free_pending_cmd (struct tcmu_cmd * cmd )
1765- {
1766- if (test_bit (TCMU_CMD_BIT_EXPIRED , & cmd -> flags )) {
1767- kmem_cache_free (tcmu_cmd_cache , cmd );
1768- return 0 ;
1769- }
1770- return - EINVAL ;
1771- }
1772-
1773- static void tcmu_blocks_release (struct radix_tree_root * blocks ,
1774- int start , int end )
1775- {
1776- int i ;
1777- struct page * page ;
1778-
1779- for (i = start ; i < end ; i ++ ) {
1780- page = radix_tree_delete (blocks , i );
1781- if (page ) {
1782- __free_page (page );
1783- atomic_dec (& global_db_count );
1784- }
1785- }
1786- }
1787-
1788- static void tcmu_remove_all_queued_tmr (struct tcmu_dev * udev )
1789- {
1790- struct tcmu_tmr * tmr , * tmp ;
1791-
1792- list_for_each_entry_safe (tmr , tmp , & udev -> tmr_queue , queue_entry ) {
1793- list_del_init (& tmr -> queue_entry );
1794- kfree (tmr );
1795- }
1796- }
1797-
1798- static void tcmu_dev_kref_release (struct kref * kref )
1799- {
1800- struct tcmu_dev * udev = container_of (kref , struct tcmu_dev , kref );
1801- struct se_device * dev = & udev -> se_dev ;
1802- struct tcmu_cmd * cmd ;
1803- bool all_expired = true;
1804- int i ;
1805-
1806- vfree (udev -> mb_addr );
1807- udev -> mb_addr = NULL ;
1808-
1809- spin_lock_bh (& timed_out_udevs_lock );
1810- if (!list_empty (& udev -> timedout_entry ))
1811- list_del (& udev -> timedout_entry );
1812- spin_unlock_bh (& timed_out_udevs_lock );
1813-
1814- /* Upper layer should drain all requests before calling this */
1815- mutex_lock (& udev -> cmdr_lock );
1816- idr_for_each_entry (& udev -> commands , cmd , i ) {
1817- if (tcmu_check_and_free_pending_cmd (cmd ) != 0 )
1818- all_expired = false;
1819- }
1820- /* There can be left over TMR cmds. Remove them. */
1821- tcmu_remove_all_queued_tmr (udev );
1822- if (!list_empty (& udev -> qfull_queue ))
1823- all_expired = false;
1824- idr_destroy (& udev -> commands );
1825- WARN_ON (!all_expired );
1826-
1827- tcmu_blocks_release (& udev -> data_blocks , 0 , udev -> dbi_max + 1 );
1828- bitmap_free (udev -> data_bitmap );
1829- mutex_unlock (& udev -> cmdr_lock );
1830-
1831- call_rcu (& dev -> rcu_head , tcmu_dev_call_rcu );
1832- }
1833-
18341834static int tcmu_release (struct uio_info * info , struct inode * inode )
18351835{
18361836 struct tcmu_dev * udev = container_of (info , struct tcmu_dev , uio_info );
0 commit comments