Open MPI logo

Portable Hardware Locality (hwloc) Documentation: v1.5.2

  |   Home   |   Support   |   FAQ   |  
helper.h
1 /*
2  * Copyright © 2009 CNRS
3  * Copyright © 2009-2012 inria. All rights reserved.
4  * Copyright © 2009-2012 Université Bordeaux 1
5  * Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved.
6  * See COPYING in top-level directory.
7  */
8 
13 #ifndef HWLOC_HELPER_H
14 #define HWLOC_HELPER_H
15 
16 #ifndef HWLOC_H
17 #error Please include the main hwloc.h instead
18 #endif
19 
20 #include <stdlib.h>
21 #include <errno.h>
22 
23 
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27 
28 
47 static inline int
49 static inline int
51 {
52  int depth = hwloc_get_type_depth(topology, type);
53 
54  if (depth != HWLOC_TYPE_DEPTH_UNKNOWN)
55  return depth;
56 
57  /* find the highest existing level with type order >= */
58  for(depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PU); ; depth--)
59  if (hwloc_compare_types(hwloc_get_depth_type(topology, depth), type) < 0)
60  return depth+1;
61 
62  /* Shouldn't ever happen, as there is always a SYSTEM level with lower order and known depth. */
63  /* abort(); */
64 }
65 
75 static inline int
77 static inline int
79 {
80  int depth = hwloc_get_type_depth(topology, type);
81 
82  if (depth != HWLOC_TYPE_DEPTH_UNKNOWN)
83  return depth;
84 
85  /* find the lowest existing level with type order <= */
86  for(depth = 0; ; depth++)
87  if (hwloc_compare_types(hwloc_get_depth_type(topology, depth), type) > 0)
88  return depth-1;
89 
90  /* Shouldn't ever happen, as there is always a PU level with higher order and known depth. */
91  /* abort(); */
92 }
93 
113 static inline hwloc_obj_t
115 static inline hwloc_obj_t
117 {
118  return hwloc_get_obj_by_depth (topology, 0, 0);
119 }
120 
122 static inline hwloc_obj_t
123 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology , unsigned depth, hwloc_obj_t obj) ;
124 static inline hwloc_obj_t
126 {
127  hwloc_obj_t ancestor = obj;
128  if (obj->depth < depth)
129  return NULL;
130  while (ancestor && ancestor->depth > depth)
131  ancestor = ancestor->parent;
132  return ancestor;
133 }
134 
136 static inline hwloc_obj_t
138 static inline hwloc_obj_t
140 {
141  hwloc_obj_t ancestor = obj->parent;
142  while (ancestor && ancestor->type != type)
143  ancestor = ancestor->parent;
144  return ancestor;
145 }
146 
151 static inline hwloc_obj_t
153 {
154  if (!prev)
155  return hwloc_get_obj_by_depth (topology, depth, 0);
156  if (prev->depth != depth)
157  return NULL;
158  return prev->next_cousin;
159 }
160 
167 static inline hwloc_obj_t
169  hwloc_obj_t prev)
170 {
171  int depth = hwloc_get_type_depth(topology, type);
172  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
173  return NULL;
174  return hwloc_get_next_obj_by_depth (topology, depth, prev);
175 }
176 
185 static inline hwloc_obj_t
186 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) ;
187 static inline hwloc_obj_t
188 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
189 {
190  hwloc_obj_t obj = NULL;
191  while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL)
192  if (obj->os_index == os_index)
193  return obj;
194  return NULL;
195 }
196 
201 static inline hwloc_obj_t
203 {
204  if (!prev)
205  return parent->first_child;
206  if (prev->parent != parent)
207  return NULL;
208  return prev->next_sibling;
209 }
210 
212 static inline hwloc_obj_t
214 static inline hwloc_obj_t
216 {
217  /* the loop isn't so easy since intermediate ancestors may have
218  * different depth, causing us to alternate between using obj1->parent
219  * and obj2->parent. Also, even if at some point we find ancestors of
220  * of the same depth, their ancestors may have different depth again.
221  */
222  while (obj1 != obj2) {
223  while (obj1->depth > obj2->depth)
224  obj1 = obj1->parent;
225  while (obj2->depth > obj1->depth)
226  obj2 = obj2->parent;
227  if (obj1 != obj2 && obj1->depth == obj2->depth) {
228  obj1 = obj1->parent;
229  obj2 = obj2->parent;
230  }
231  }
232  return obj1;
233 }
234 
239 static inline int
240 hwloc_obj_is_in_subtree (hwloc_topology_t topology , hwloc_obj_t obj, hwloc_obj_t subtree_root) ;
241 static inline int
243 {
244  return hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset);
245 }
246 
266 static inline hwloc_obj_t
268 {
269  hwloc_obj_t obj = hwloc_get_root_obj(topology);
270  if (!obj->cpuset || !hwloc_bitmap_intersects(obj->cpuset, set))
271  return NULL;
272  while (!hwloc_bitmap_isincluded(obj->cpuset, set)) {
273  /* while the object intersects without being included, look at its children */
274  hwloc_obj_t child = NULL;
275  while ((child = hwloc_get_next_child(topology, obj, child)) != NULL) {
276  if (child->cpuset && hwloc_bitmap_intersects(child->cpuset, set))
277  break;
278  }
279  if (!child)
280  /* no child intersects, return their father */
281  return obj;
282  /* found one intersecting child, look at its children */
283  obj = child;
284  }
285  /* obj is included, return it */
286  return obj;
287 }
288 
297  hwloc_obj_t * restrict objs, int max);
298 
308 static inline hwloc_obj_t
310  unsigned depth, hwloc_obj_t prev)
311 {
312  hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
313  if (!next || !next->cpuset)
314  return NULL;
315  while (next && !hwloc_bitmap_isincluded(next->cpuset, set))
316  next = next->next_cousin;
317  return next;
318 }
319 
329 static inline hwloc_obj_t
331  hwloc_obj_type_t type, hwloc_obj_t prev)
332 {
333  int depth = hwloc_get_type_depth(topology, type);
334  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
335  return NULL;
336  return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev);
337 }
338 
344 static inline hwloc_obj_t
346  unsigned depth, unsigned idx) ;
347 static inline hwloc_obj_t
349  unsigned depth, unsigned idx)
350 {
351  hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
352  unsigned count = 0;
353  if (!obj || !obj->cpuset)
354  return NULL;
355  while (obj) {
356  if (hwloc_bitmap_isincluded(obj->cpuset, set)) {
357  if (count == idx)
358  return obj;
359  count++;
360  }
361  obj = obj->next_cousin;
362  }
363  return NULL;
364 }
365 
375 static inline hwloc_obj_t
377  hwloc_obj_type_t type, unsigned idx) ;
378 static inline hwloc_obj_t
380  hwloc_obj_type_t type, unsigned idx)
381 {
382  int depth = hwloc_get_type_depth(topology, type);
383  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
384  return NULL;
385  return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx);
386 }
387 
393 static inline unsigned
395  unsigned depth) ;
396 static inline unsigned
398  unsigned depth)
399 {
400  hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
401  unsigned count = 0;
402  if (!obj || !obj->cpuset)
403  return 0;
404  while (obj) {
405  if (hwloc_bitmap_isincluded(obj->cpuset, set))
406  count++;
407  obj = obj->next_cousin;
408  }
409  return count;
410 }
411 
421 static inline int
423  hwloc_obj_type_t type) ;
424 static inline int
426  hwloc_obj_type_t type)
427 {
428  int depth = hwloc_get_type_depth(topology, type);
429  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
430  return 0;
431  if (depth == HWLOC_TYPE_DEPTH_MULTIPLE)
432  return -1; /* FIXME: agregate nbobjs from different levels? */
433  return hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth);
434 }
435 
444 static inline int
446  hwloc_obj_t obj) ;
447 static inline int
449  hwloc_obj_t obj)
450 {
451  int idx = 0;
452  if (!hwloc_bitmap_isincluded(obj->cpuset, set))
453  return -1;
454  /* count how many objects are inside the cpuset on the way from us to the beginning of the level */
455  while ((obj = obj->prev_cousin) != NULL)
456  if (hwloc_bitmap_isincluded(obj->cpuset, set))
457  idx++;
458  return idx;
459 }
460 
475 static inline hwloc_obj_t
477  hwloc_obj_t parent) ;
478 static inline hwloc_obj_t
480  hwloc_obj_t parent)
481 {
482  hwloc_obj_t child;
483  if (!parent->cpuset || hwloc_bitmap_iszero(set))
484  return NULL;
485  child = parent->first_child;
486  while (child) {
487  if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset))
488  return child;
489  child = child->next_sibling;
490  }
491  return NULL;
492 }
493 
501 static inline hwloc_obj_t
503 static inline hwloc_obj_t
505 {
506  struct hwloc_obj *current = hwloc_get_root_obj(topology);
507  if (hwloc_bitmap_iszero(set) || !current->cpuset || !hwloc_bitmap_isincluded(set, current->cpuset))
508  return NULL;
509  while (1) {
510  hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current);
511  if (!child)
512  return current;
513  current = child;
514  }
515 }
516 
517 
536 static inline hwloc_obj_t
538  unsigned depth, hwloc_obj_t prev)
539 {
540  hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
541  if (!next || !next->cpuset)
542  return NULL;
543  while (next && !hwloc_bitmap_intersects(set, next->cpuset))
544  next = next->next_cousin;
545  return next;
546 }
547 
563 static inline hwloc_obj_t
566 {
567  int depth = hwloc_get_type_depth(topology, type);
568  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
569  return NULL;
570  return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev);
571 }
572 
600 static inline int
602  unsigned cachelevel, hwloc_obj_cache_type_t cachetype)
603 {
604  int depth;
605  int found = HWLOC_TYPE_DEPTH_UNKNOWN;
606  for (depth=0; ; depth++) {
607  hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
608  if (!obj)
609  break;
610  if (obj->type != HWLOC_OBJ_CACHE || obj->attr->cache.depth != cachelevel)
611  /* doesn't match, try next depth */
612  continue;
613  if (cachetype == (hwloc_obj_cache_type_t) -1) {
614  if (found != HWLOC_TYPE_DEPTH_UNKNOWN) {
615  /* second match, return MULTIPLE */
617  }
618  /* first match, mark it as found */
619  found = depth;
620  continue;
621  }
622  if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED)
623  /* exact match (either unified is alone, or we match instruction or data), return immediately */
624  return depth;
625  }
626  /* went to the bottom, return what we found */
627  return found;
628 }
629 
637 static inline hwloc_obj_t
639 static inline hwloc_obj_t
641 {
642  hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set);
643  while (current) {
644  if (current->type == HWLOC_OBJ_CACHE)
645  return current;
646  current = current->parent;
647  }
648  return NULL;
649 }
650 
655 static inline hwloc_obj_t
657 static inline hwloc_obj_t
659 {
660  hwloc_obj_t current = obj->parent;
661  if (!obj->cpuset)
662  return NULL;
663  while (current && current->cpuset) {
664  if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset)
665  && current->type == HWLOC_OBJ_CACHE)
666  return current;
667  current = current->parent;
668  }
669  return NULL;
670 }
671 
696 /* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */
697  unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * restrict objs, unsigned max);
698 
711 static inline hwloc_obj_t
713  hwloc_obj_type_t type1, unsigned idx1,
714  hwloc_obj_type_t type2, unsigned idx2) ;
715 static inline hwloc_obj_t
717  hwloc_obj_type_t type1, unsigned idx1,
718  hwloc_obj_type_t type2, unsigned idx2)
719 {
720  hwloc_obj_t obj;
721  obj = hwloc_get_obj_by_type (topology, type1, idx1);
722  if (!obj || !obj->cpuset)
723  return NULL;
724  return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2);
725 }
726 
745 static inline hwloc_obj_t
746 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) ;
747 static inline hwloc_obj_t
748 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv)
749 {
750  hwloc_obj_t obj = hwloc_get_root_obj(topology);
751  int i;
752  for(i=0; i<nr; i++) {
753  if (!obj || !obj->cpuset)
754  return NULL;
755  obj = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, typev[i], idxv[i]);
756  }
757  return obj;
758 }
759 
783 static inline void
784 hwloc_distributev(hwloc_topology_t topology, hwloc_obj_t *root, unsigned n_roots, hwloc_cpuset_t *cpuset, unsigned n, unsigned until);
785 static inline void
786 hwloc_distribute(hwloc_topology_t topology, hwloc_obj_t root, hwloc_cpuset_t *cpuset, unsigned n, unsigned until)
787 {
788  unsigned i;
789  if (!root->arity || n == 1 || root->depth >= until) {
790  /* Got to the bottom, we can't split any more, put everything there. */
791  for (i=0; i<n; i++)
792  cpuset[i] = hwloc_bitmap_dup(root->cpuset);
793  return;
794  }
795  hwloc_distributev(topology, root->children, root->arity, cpuset, n, until);
796 }
797 
805 static inline void
806 hwloc_distributev(hwloc_topology_t topology, hwloc_obj_t *roots, unsigned n_roots, hwloc_cpuset_t *cpuset, unsigned n, unsigned until)
807 {
808  unsigned i;
809  unsigned tot_weight;
810  hwloc_cpuset_t *cpusetp = cpuset;
811 
812  tot_weight = 0;
813  for (i = 0; i < n_roots; i++)
814  if (roots[i]->cpuset)
815  tot_weight += hwloc_bitmap_weight(roots[i]->cpuset);
816 
817  for (i = 0; i < n_roots && tot_weight; i++) {
818  /* Give to roots[i] a portion proportional to its weight */
819  unsigned weight = roots[i]->cpuset ? hwloc_bitmap_weight(roots[i]->cpuset) : 0;
820  unsigned chunk = (n * weight + tot_weight-1) / tot_weight;
821  hwloc_distribute(topology, roots[i], cpusetp, chunk, until);
822  cpusetp += chunk;
823  tot_weight -= weight;
824  n -= chunk;
825  }
826 }
827 
834 static inline void *
836 {
837  void *p = hwloc_alloc_membind_nodeset(topology, len, nodeset, policy, flags);
838  if (p)
839  return p;
840  hwloc_set_membind_nodeset(topology, nodeset, policy, flags);
841  p = hwloc_alloc(topology, len);
842  if (p && policy != HWLOC_MEMBIND_FIRSTTOUCH)
843  /* Enforce the binding by touching the data */
844  memset(p, 0, len);
845  return p;
846 }
847 
852 static inline void *
854 {
855  void *p = hwloc_alloc_membind(topology, len, cpuset, policy, flags);
856  if (p)
857  return p;
858  hwloc_set_membind(topology, cpuset, policy, flags);
859  p = hwloc_alloc(topology, len);
860  if (p && policy != HWLOC_MEMBIND_FIRSTTOUCH)
861  /* Enforce the binding by touching the data */
862  memset(p, 0, len);
863  return p;
864 }
865 
882 static inline hwloc_const_cpuset_t
884 static inline hwloc_const_cpuset_t
886 {
887  return hwloc_get_root_obj(topology)->complete_cpuset;
888 }
889 
900 static inline hwloc_const_cpuset_t
902 static inline hwloc_const_cpuset_t
904 {
905  return hwloc_get_root_obj(topology)->cpuset;
906 }
907 
917 static inline hwloc_const_cpuset_t
919 static inline hwloc_const_cpuset_t
921 {
922  return hwloc_get_root_obj(topology)->online_cpuset;
923 }
924 
934 static inline hwloc_const_cpuset_t
936 static inline hwloc_const_cpuset_t
938 {
939  return hwloc_get_root_obj(topology)->allowed_cpuset;
940 }
941 
958 static inline hwloc_const_nodeset_t
960 static inline hwloc_const_nodeset_t
962 {
963  return hwloc_get_root_obj(topology)->complete_nodeset;
964 }
965 
976 static inline hwloc_const_nodeset_t
978 static inline hwloc_const_nodeset_t
980 {
981  return hwloc_get_root_obj(topology)->nodeset;
982 }
983 
993 static inline hwloc_const_nodeset_t
995 static inline hwloc_const_nodeset_t
997 {
998  return hwloc_get_root_obj(topology)->allowed_nodeset;
999 }
1000 
1031 static inline void
1033 {
1034  int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NODE);
1035  hwloc_obj_t obj;
1036 
1037  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) {
1038  if (hwloc_bitmap_iszero(cpuset))
1039  hwloc_bitmap_zero(nodeset);
1040  else
1041  /* Assume the whole system */
1042  hwloc_bitmap_fill(nodeset);
1043  return;
1044  }
1045 
1046  hwloc_bitmap_zero(nodeset);
1047  obj = NULL;
1048  while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, cpuset, depth, obj)) != NULL)
1049  hwloc_bitmap_set(nodeset, obj->os_index);
1050 }
1051 
1059 static inline void
1061 {
1062  int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NODE);
1063  hwloc_obj_t obj;
1064  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN )
1065  return;
1066  hwloc_bitmap_zero(nodeset);
1067  obj = NULL;
1068  while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, cpuset, depth, obj)) != NULL)
1069  hwloc_bitmap_set(nodeset, obj->os_index);
1070 }
1071 
1080 static inline void
1082 {
1083  int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NODE);
1084  hwloc_obj_t obj;
1085 
1086  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN ) {
1087  if (hwloc_bitmap_iszero(nodeset))
1088  hwloc_bitmap_zero(cpuset);
1089  else
1090  /* Assume the whole system */
1091  hwloc_bitmap_fill(cpuset);
1092  return;
1093  }
1094 
1095  hwloc_bitmap_zero(cpuset);
1096  obj = NULL;
1097  while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) {
1098  if (hwloc_bitmap_isset(nodeset, obj->os_index))
1099  /* no need to check obj->cpuset because objects in levels always have a cpuset */
1100  hwloc_bitmap_or(cpuset, cpuset, obj->cpuset);
1101  }
1102 }
1103 
1111 static inline void
1113 {
1114  int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NODE);
1115  hwloc_obj_t obj;
1116  if (depth == HWLOC_TYPE_DEPTH_UNKNOWN )
1117  return;
1118  hwloc_bitmap_zero(cpuset);
1119  obj = NULL;
1120  while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL)
1121  if (hwloc_bitmap_isset(nodeset, obj->os_index))
1122  /* no need to check obj->cpuset because objects in levels always have a cpuset */
1123  hwloc_bitmap_or(cpuset, cpuset, obj->cpuset);
1124 }
1125 
1153 static inline const struct hwloc_distances_s *
1155 {
1156  hwloc_obj_t root = hwloc_get_root_obj(topology);
1157  unsigned i;
1158  for(i=0; i<root->distances_count; i++)
1159  if (root->distances[i]->relative_depth == depth)
1160  return root->distances[i];
1161  return NULL;
1162 }
1163 
1183 static inline const struct hwloc_distances_s *
1185 {
1186  int depth = hwloc_get_type_depth(topology, type);
1187  if (depth < 0)
1188  return NULL;
1189  return hwloc_get_whole_distance_matrix_by_depth(topology, depth);
1190 }
1191 
1205 static inline const struct hwloc_distances_s *
1207  hwloc_obj_t obj, unsigned depth,
1208  unsigned *firstp)
1209 {
1210  while (obj && obj->cpuset) {
1211  unsigned i;
1212  for(i=0; i<obj->distances_count; i++)
1213  if (obj->distances[i]->relative_depth == depth - obj->depth) {
1214  if (!obj->distances[i]->nbobjs)
1215  continue;
1216  *firstp = hwloc_get_next_obj_inside_cpuset_by_depth(topology, obj->cpuset, depth, NULL)->logical_index;
1217  return obj->distances[i];
1218  }
1219  obj = obj->parent;
1220  }
1221  return NULL;
1222 }
1223 
1235 static inline int
1237  hwloc_obj_t obj1, hwloc_obj_t obj2,
1238  float *latency, float *reverse_latency)
1239 {
1240  hwloc_obj_t ancestor;
1241  const struct hwloc_distances_s * distances;
1242  unsigned first_logical ;
1243 
1244  if (obj1->depth != obj2->depth) {
1245  errno = EINVAL;
1246  return -1;
1247  }
1248 
1249  ancestor = hwloc_get_common_ancestor_obj(topology, obj1, obj2);
1250  distances = hwloc_get_distance_matrix_covering_obj_by_depth(topology, ancestor, obj1->depth, &first_logical);
1251  if (distances && distances->latency) {
1252  const float * latency_matrix = distances->latency;
1253  unsigned nbobjs = distances->nbobjs;
1254  unsigned l1 = obj1->logical_index - first_logical;
1255  unsigned l2 = obj2->logical_index - first_logical;
1256  *latency = latency_matrix[l1*nbobjs+l2];
1257  *reverse_latency = latency_matrix[l2*nbobjs+l1];
1258  return 0;
1259  }
1260 
1261  errno = ENOSYS;
1262  return -1;
1263 }
1264 
1279 static inline hwloc_obj_t
1281  hwloc_obj_t ioobj)
1282 {
1283  hwloc_obj_t obj = ioobj;
1284  while (obj && !obj->cpuset) {
1285  obj = obj->parent;
1286  }
1287  return obj;
1288 }
1289 
1294 static inline hwloc_obj_t
1296 {
1297  return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev);
1298 }
1299 
1303 static inline hwloc_obj_t
1305  unsigned domain, unsigned bus, unsigned dev, unsigned func)
1306 {
1307  hwloc_obj_t obj = NULL;
1308  while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) {
1309  if (obj->attr->pcidev.domain == domain
1310  && obj->attr->pcidev.bus == bus
1311  && obj->attr->pcidev.dev == dev
1312  && obj->attr->pcidev.func == func)
1313  return obj;
1314  }
1315  return NULL;
1316 }
1317 
1321 static inline hwloc_obj_t
1323 {
1324  unsigned domain = 0; /* default */
1325  unsigned bus, dev, func;
1326 
1327  if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3
1328  && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) {
1329  errno = EINVAL;
1330  return NULL;
1331  }
1332 
1333  return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func);
1334 }
1335 
1340 static inline hwloc_obj_t
1342 {
1343  return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev);
1344 }
1345 
1350 static inline hwloc_obj_t
1352 {
1353  return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev);
1354 }
1355 
1356 /* \brief Checks whether a given bridge covers a given PCI bus.
1357  */
1358 static inline int
1360  unsigned domain, unsigned bus)
1361 {
1362  return bridge->type == HWLOC_OBJ_BRIDGE
1364  && bridge->attr->bridge.downstream.pci.domain == domain
1365  && bridge->attr->bridge.downstream.pci.secondary_bus <= bus
1366  && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus;
1367 }
1368 
1374 static inline hwloc_obj_t
1376  unsigned domain, unsigned bus)
1377 {
1378  hwloc_obj_t obj = NULL;
1379  while ((obj = hwloc_get_next_bridge(topology, obj)) != NULL) {
1380  if (hwloc_bridge_covers_pcibus(obj, domain, bus)) {
1381  /* found bridge covering this pcibus, make sure it's a hostbridge */
1383  assert(obj->parent->type != HWLOC_OBJ_BRIDGE);
1384  assert(obj->parent->cpuset);
1385  return obj;
1386  }
1387  }
1388  return NULL;
1389 }
1390 
1395 #ifdef __cplusplus
1396 } /* extern "C" */
1397 #endif
1398 
1399 
1400 #endif /* HWLOC_HELPER_H */