@@ -190,6 +190,10 @@ struct peer {
190190 uint8_t endpoint_type ;
191191 uint8_t medium_spec ;
192192 } recovery ;
193+
194+ // Pool size
195+ uint8_t pool_size ;
196+ uint8_t pool_start ;
193197};
194198
195199struct ctx {
@@ -1364,7 +1368,7 @@ static int endpoint_query_phys(struct ctx *ctx, const dest_phys *dest,
13641368}
13651369
13661370/* returns -ECONNREFUSED if the endpoint returns failure. */
1367- static int endpoint_send_set_endpoint_id (const struct peer * peer ,
1371+ static int endpoint_send_set_endpoint_id (struct peer * peer ,
13681372 mctp_eid_t * new_eidp )
13691373{
13701374 struct sockaddr_mctp_ext addr ;
@@ -1430,9 +1434,11 @@ static int endpoint_send_set_endpoint_id(const struct peer *peer,
14301434
14311435 alloc = resp -> status & 0x3 ;
14321436 if (alloc != 0 ) {
1433- // TODO for bridges
1434- warnx ("%s requested allocation pool, unimplemented" ,
1435- dest_phys_tostr (dest ));
1437+ peer -> pool_size = resp -> eid_pool_size ;
1438+ if (peer -> ctx -> verbose ) {
1439+ warnx ("%s requested allocation of pool size = %d" ,
1440+ dest_phys_tostr (dest ), peer -> pool_size );
1441+ }
14361442 }
14371443
14381444 rc = 0 ;
@@ -1660,15 +1666,37 @@ static int peer_set_mtu(struct ctx *ctx, struct peer *peer, uint32_t mtu)
16601666 return rc ;
16611667}
16621668
1669+ // checks if EIDs from bridge + 1 has contiguous max_pool_size available eids
1670+ // returns next candidate eid for pool start
1671+ static int get_next_pool_start (mctp_eid_t bridge_eid , struct net * n ,
1672+ int max_pool_size )
1673+ {
1674+ if (bridge_eid + 1 + max_pool_size > eid_alloc_max ) {
1675+ return - EADDRNOTAVAIL ;
1676+ }
1677+ for (mctp_eid_t e = bridge_eid + 1 ; e <= bridge_eid + max_pool_size ;
1678+ e ++ ) {
1679+ // found a bridge in between, need to skip its pool range
1680+ if (n -> peers [e ] != NULL ) {
1681+ e += n -> peers [e ]-> pool_size ;
1682+ return e ;
1683+ }
1684+ }
1685+ /* possible to have contiguous eids within
1686+ bridge_eid to bridge_eid + max_pool_size */
1687+ return bridge_eid + 1 ;
1688+ }
1689+
16631690static int endpoint_assign_eid (struct ctx * ctx , sd_bus_error * berr ,
16641691 const dest_phys * dest , struct peer * * ret_peer ,
1665- mctp_eid_t static_eid )
1692+ mctp_eid_t static_eid , bool assign_bridge )
16661693{
16671694 mctp_eid_t e , new_eid ;
16681695 struct net * n = NULL ;
16691696 struct peer * peer = NULL ;
16701697 uint32_t net ;
16711698 int rc ;
1699+ bool is_pool_possible = false;
16721700
16731701 net = mctp_nl_net_byindex (ctx -> nl , dest -> ifindex );
16741702 if (!net ) {
@@ -1691,11 +1719,50 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
16911719 } else {
16921720 /* Find an unused EID */
16931721 for (e = eid_alloc_min ; e <= eid_alloc_max ; e ++ ) {
1694- if (n -> peers [e ])
1722+ if (n -> peers [e ]) {
1723+ // used peer may be a bridge, skip its eid range
1724+ e += n -> peers [e ]-> pool_size ;
16951725 continue ;
1726+ }
1727+
1728+ // check for max sized pool from e + 1
1729+ if (assign_bridge ) {
1730+ int next_pool_start = get_next_pool_start (
1731+ e , n , ctx -> max_pool_size );
1732+ if (next_pool_start < 0 ) {
1733+ warnx ("Ran out of EIDs from net %d while"
1734+ "allocating bridge downstream endpoint at %s " ,
1735+ net , dest_phys_tostr (dest ));
1736+ is_pool_possible = false;
1737+ /*ran out of pool eid : set only bridge eid then
1738+ find first available bridge eid which is not part of any pool*/
1739+ for (e = eid_alloc_min ;
1740+ e <= eid_alloc_max ; e ++ ) {
1741+ if (n -> peers [e ]) {
1742+ // used peer may be a bridge, skip its eid range
1743+ e += n -> peers [e ]
1744+ -> pool_size ;
1745+ continue ;
1746+ }
1747+ break ;
1748+ }
1749+ } else if (next_pool_start != e + 1 ) {
1750+ // e doesn't have any contiguous max pool size eids available
1751+ e += next_pool_start ;
1752+ continue ;
1753+ } else {
1754+ // found contigous eids of max_pool_size from bridge_eid
1755+ is_pool_possible = true;
1756+ }
1757+ }
1758+
16961759 rc = add_peer (ctx , dest , e , net , & peer );
16971760 if (rc < 0 )
16981761 return rc ;
1762+ if (assign_bridge && is_pool_possible ) {
1763+ peer -> pool_size = ctx -> max_pool_size ;
1764+ peer -> pool_start = e + 1 ;
1765+ }
16991766 break ;
17001767 }
17011768 if (e > eid_alloc_max ) {
@@ -1718,6 +1785,10 @@ static int endpoint_assign_eid(struct ctx *ctx, sd_bus_error *berr,
17181785 }
17191786
17201787 if (new_eid != peer -> eid ) {
1788+ // avoid allocation for any different EID in response
1789+ warnx ("Mismatch of requested from received EID, resetting the pool" );
1790+ peer -> pool_size = 0 ;
1791+ peer -> pool_start = 0 ;
17211792 rc = change_peer_eid (peer , new_eid );
17221793 if (rc == - EEXIST ) {
17231794 sd_bus_error_setf (
@@ -2117,7 +2188,7 @@ static int method_setup_endpoint(sd_bus_message *call, void *data,
21172188 }
21182189
21192190 /* Set Endpoint ID */
2120- rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 );
2191+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 , false );
21212192 if (rc < 0 )
21222193 goto err ;
21232194
@@ -2170,21 +2241,41 @@ static int method_assign_endpoint(sd_bus_message *call, void *data,
21702241 peer -> net , peer_path , 0 );
21712242 }
21722243
2173- rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 );
2244+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , 0 , true );
21742245 if (rc < 0 )
21752246 goto err ;
21762247
21772248 peer_path = path_from_peer (peer );
21782249 if (!peer_path )
21792250 goto err ;
21802251
2252+ if (peer -> pool_size > 0 ) {
2253+ // Call for Allocate EndpointID
2254+ }
2255+
21812256 return sd_bus_reply_method_return (call , "yisb" , peer -> eid , peer -> net ,
21822257 peer_path , 1 );
21832258err :
21842259 set_berr (ctx , rc , berr );
21852260 return rc ;
21862261}
21872262
2263+ // Checks if given EID belongs to any bridge's pool range
2264+ static bool is_eid_in_bridge_pool (struct net * n , mctp_eid_t eid )
2265+ {
2266+ for (int i = eid_alloc_min ; i <= eid ; i ++ ) {
2267+ struct peer * peer = n -> peers [i ];
2268+ if (peer && peer -> pool_size > 0 ) {
2269+ if (eid >= peer -> pool_start &&
2270+ eid < peer -> pool_start + peer -> pool_size ) {
2271+ return true;
2272+ }
2273+ i += peer -> pool_size ;
2274+ }
2275+ }
2276+ return false;
2277+ }
2278+
21882279static int method_assign_endpoint_static (sd_bus_message * call , void * data ,
21892280 sd_bus_error * berr )
21902281{
@@ -2239,10 +2330,22 @@ static int method_assign_endpoint_static(sd_bus_message *call, void *data,
22392330 return sd_bus_error_setf (berr ,
22402331 SD_BUS_ERROR_INVALID_ARGS ,
22412332 "Address in use" );
2333+ } else {
2334+ // is requested EID part of any bridge pool range
2335+ struct net * n = lookup_net (ctx , netid );
2336+ if (!n ) {
2337+ bug_warn ("%s: Bad old net %d" , __func__ , netid );
2338+ return - EPROTO ;
2339+ }
2340+ if (is_eid_in_bridge_pool (n , eid )) {
2341+ return sd_bus_error_setf (
2342+ berr , SD_BUS_ERROR_INVALID_ARGS ,
2343+ "EID belongs to another MCTP bridge pool" );
2344+ }
22422345 }
22432346 }
22442347
2245- rc = endpoint_assign_eid (ctx , berr , dest , & peer , eid );
2348+ rc = endpoint_assign_eid (ctx , berr , dest , & peer , eid , false );
22462349 if (rc < 0 ) {
22472350 goto err ;
22482351 }
@@ -2652,7 +2755,8 @@ static int peer_endpoint_recover(sd_event_source *s, uint64_t usec,
26522755 * after which we immediately return as there's no old peer state left to
26532756 * maintain.
26542757 */
2655- return endpoint_assign_eid (ctx , NULL , & phys , & peer , 0 );
2758+ return endpoint_assign_eid (ctx , NULL , & phys , & peer , 0 ,
2759+ false);
26562760 }
26572761
26582762 /* Confirmation of the same device, apply its already allocated EID */
0 commit comments