10 PGraph Graph = PGraph::TObj::New();
12 const TAttrType NodeType = Table->GetColType(SrcCol);
13 Assert(NodeType == Table->GetColType(DstCol));
14 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
15 const TInt DstColIdx = Table->GetColIdx(DstCol);
18 if (NodeType ==
atInt) {
19 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
20 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue; }
22 TInt SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
23 TInt DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
25 Graph->AddNodeUnchecked(SVal);
26 Graph->AddNodeUnchecked(DVal);
27 Graph->AddEdgeUnchecked(SVal, DVal);
29 }
else if (NodeType ==
atFlt) {
33 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
34 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue; }
37 TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
38 SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
39 TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
40 DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
41 Graph->AddEdge(SVal, DVal);
44 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
45 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue; }
47 TInt SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
49 TInt DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
52 Graph->AddNodeUnchecked(SVal);
53 Graph->AddNodeUnchecked(DVal);
54 Graph->AddEdgeUnchecked(SVal, DVal);
58 Graph->SortNodeAdjV();
64 template<
class PGraph>
66 const TStr& SrcCol,
const TStr& DstCol,
70 PGraph Graph = PGraph::TObj::New();
72 const TAttrType NodeType = Table->GetColType(SrcCol);
73 Assert(NodeType == Table->GetColType(DstCol));
74 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
75 const TInt DstColIdx = Table->GetColIdx(DstCol);
91 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
92 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
98 if (NodeType ==
atFlt) {
99 TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
100 SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
101 TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
102 DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
103 }
else if (NodeType ==
atInt || NodeType ==
atStr) {
104 if (NodeType ==
atInt) {
105 SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
106 DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
108 SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
109 if (strlen(Table->GetContextKey(SVal)) == 0) {
continue; }
110 DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
111 if (strlen(Table->GetContextKey(DVal)) == 0) {
continue; }
113 if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
114 if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
120 Graph->AddEdge(SVal, DVal, CurrRowIdx);
123 for (
TInt i = 0; i < EdgeAttrV.
Len(); i++) {
124 TStr ColName = EdgeAttrV[i];
125 TAttrType T = Table->GetColType(ColName);
126 TInt Index = Table->GetColIdx(ColName);
129 Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
132 Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
135 Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
141 if ((Table->SrcNodeAttrV).Len() > 0) {
142 Table->AddNodeAttributes(SVal, Table->SrcNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
145 if ((Table->DstNodeAttrV).Len() > 0) {
146 Table->AddNodeAttributes(DVal, Table->DstNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
151 if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) {
152 for (
TNEANet::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) {
153 TInt NId = NodeI.GetId();
154 if (NodeIntAttrs.
IsKey(NId)) {
157 TInt AttrVal = Table->AggregateVector<
TInt>(it.GetDat(), AggrPolicy);
158 Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey());
161 if (NodeFltAttrs.
IsKey(NId)) {
164 TFlt AttrVal = Table->AggregateVector<
TFlt>(it.GetDat(), AggrPolicy);
165 Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey());
168 if (NodeStrAttrs.
IsKey(NId)) {
171 TStr AttrVal = Table->AggregateVector<
TStr>(it.GetDat(), AggrPolicy);
172 Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey());
182 template<
class PGraph>
187 return ToNetwork<PGraph>(Table, SrcCol, DstCol, V, AggrPolicy);
191 template<
class PGraphMP>
195 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
196 const TInt DstColIdx = Table->GetColIdx(DstCol);
197 const TAttrType NodeType = Table->GetColType(SrcCol);
198 Assert(NodeType == Table->GetColType(DstCol));
200 const TInt NumRows = Table->NumValidRows;
202 TIntV SrcCol1, DstCol1, SrcCol2, DstCol2;
204 #pragma omp parallel sections num_threads(4)
207 { SrcCol1.
Reserve(NumRows, NumRows); }
209 { SrcCol2.
Reserve(NumRows, NumRows); }
211 { DstCol1.
Reserve(NumRows, NumRows); }
213 { DstCol2.
Reserve(NumRows, NumRows); }
220 Table->GetPartitionRanges(Partitions, omp_get_max_threads());
221 TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
226 omp_set_num_threads(omp_get_max_threads());
227 if (NodeType ==
atInt) {
228 #pragma omp parallel for schedule(static)
229 for (
int i = 0; i < Partitions.
Len(); i++) {
232 while (RowI < EndI) {
242 else if (NodeType ==
atStr) {
243 #pragma omp parallel for schedule(static)
244 for (
int i = 0; i < Partitions.
Len(); i++) {
247 while (RowI < EndI) {
258 omp_set_num_threads(omp_get_max_threads());
261 #pragma omp single nowait
263 #pragma omp task untied shared(SrcCol1, DstCol1)
266 #pragma omp single nowait
268 #pragma omp task untied shared(SrcCol2, DstCol2)
285 TInt NumThreads = omp_get_max_threads();
286 TInt PartSize = (NumRows/NumThreads);
288 TIntV SrcOffsets, DstOffsets;
290 for (
TInt i = 1; i < NumThreads; i++) {
291 TInt CurrOffset = i * PartSize;
292 while (CurrOffset < (i+1) * PartSize &&
293 SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
296 if (CurrOffset < (i+1) * PartSize) { SrcOffsets.
Add(CurrOffset); }
298 SrcOffsets.
Add(NumRows);
301 for (
TInt i = 1; i < NumThreads; i++) {
302 TInt CurrOffset = i * PartSize;
303 while (CurrOffset < (i+1) * PartSize &&
304 DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
307 if (CurrOffset < (i+1) * PartSize) { DstOffsets.
Add(CurrOffset); }
309 DstOffsets.
Add(NumRows);
311 TInt SrcPartCnt = SrcOffsets.
Len()-1;
312 TInt DstPartCnt = DstOffsets.
Len()-1;
323 TIntV SrcNodeCounts, DstNodeCounts;
324 SrcNodeCounts.
Reserve(SrcPartCnt, SrcPartCnt);
325 DstNodeCounts.
Reserve(DstPartCnt, DstPartCnt);
327 #pragma omp parallel for schedule(dynamic)
328 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
329 if (t < SrcPartCnt) {
331 if (SrcOffsets[i] != SrcOffsets[i+1]) {
332 SrcNodeCounts[i] = 1;
333 TInt CurrNode = SrcCol1[SrcOffsets[i]];
334 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
335 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
336 if (j < SrcOffsets[i+1]) {
338 CurrNode = SrcCol1[j];
343 TInt i = t - SrcPartCnt;
344 if (DstOffsets[i] != DstOffsets[i+1]) {
345 DstNodeCounts[i] = 1;
346 TInt CurrNode = DstCol2[DstOffsets[i]];
347 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
348 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
349 if (j < DstOffsets[i+1]) {
351 CurrNode = DstCol2[j];
367 TInt TotalSrcNodes = 0;
369 for (
int i = 0; i < SrcPartCnt; i++) {
370 SrcIdOffsets.
Add(TotalSrcNodes);
371 TotalSrcNodes += SrcNodeCounts[i];
374 TInt TotalDstNodes = 0;
376 for (
int i = 0; i < DstPartCnt; i++) {
377 DstIdOffsets.
Add(TotalDstNodes);
378 TotalDstNodes += DstNodeCounts[i];
383 TIntPrV SrcNodeIds, DstNodeIds;
384 #pragma omp parallel sections
387 { SrcNodeIds.
Reserve(TotalSrcNodes, TotalSrcNodes); }
389 { DstNodeIds.
Reserve(TotalDstNodes, TotalDstNodes); }
392 #pragma omp parallel for schedule(dynamic)
393 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
394 if (t < SrcPartCnt) {
396 if (SrcOffsets[i] != SrcOffsets[i+1]) {
397 TInt CurrNode = SrcCol1[SrcOffsets[i]];
398 TInt ThreadOffset = SrcIdOffsets[i];
399 SrcNodeIds[ThreadOffset] =
TIntPr(CurrNode, SrcOffsets[i]);
401 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
402 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
403 if (j < SrcOffsets[i+1]) {
404 CurrNode = SrcCol1[j];
405 SrcNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
411 TInt i = t - SrcPartCnt;
412 if (DstOffsets[i] != DstOffsets[i+1]) {
413 TInt CurrNode = DstCol2[DstOffsets[i]];
414 TInt ThreadOffset = DstIdOffsets[i];
415 DstNodeIds[ThreadOffset] =
TIntPr(CurrNode, DstOffsets[i]);
417 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
418 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
419 if (j < DstOffsets[i+1]) {
420 CurrNode = DstCol2[j];
421 DstNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
433 Nodes.
Reserve(TotalSrcNodes+TotalDstNodes);
439 while (i < TotalSrcNodes && j < TotalDstNodes) {
440 if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
441 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, j));
444 }
else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
445 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1));
448 Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j));
452 for (; i < TotalSrcNodes; i++) { Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1)); }
453 for (; j < TotalDstNodes; j++) { Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j)); }
463 int Delta = (NumNodes+NumThreads-1)/NumThreads;
468 omp_set_num_threads(NumThreads);
469 #pragma omp parallel for schedule(static,Delta)
470 for (
int m = 0; m < NumNodes; m++) {
476 TInt Offset = SrcNodeIds[i].GetVal2();
477 TInt Sz = DstCol1.
Len()-Offset;
478 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
483 TInt Offset = DstNodeIds[j].GetVal2();
484 TInt Sz = SrcCol2.
Len()-Offset;
485 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
496 NumThreads = omp_get_max_threads();
497 Delta = (NumNodes+NumThreads-1)/(10*NumThreads);
498 omp_set_num_threads(NumThreads);
499 #pragma omp parallel for schedule(dynamic)
500 for (
int m = 0; m < NumNodes; m++) {
506 TInt Offset = SrcNodeIds[i].GetVal2();
507 TInt Sz = DstCol1.
Len()-Offset;
508 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
513 TInt Offset = DstNodeIds[j].GetVal2();
514 TInt Sz = SrcCol2.
Len()-Offset;
515 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
519 Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
523 Graph->SetNodes(NumNodes);
533 template<
class PGraphMP>
536 int MaxThreads = omp_get_max_threads();
537 int Length, Threads, Delta, Nodes, Last;
538 uint64_t NumNodesEst;
539 TInt SrcColIdx, DstColIdx;
542 SrcColIdx = Table->GetColIdx(SrcCol);
543 DstColIdx = Table->GetColIdx(DstCol);
544 const TAttrType NodeType = Table->GetColType(SrcCol);
545 Assert(NodeType == Table->GetColType(DstCol));
549 int NumRows = Table->Next.Len();
551 int sz = NumRows / Load;
552 int *buckets = (
int *)malloc(sz *
sizeof(
int));
554 #pragma omp parallel for
555 for (
int i = 0; i < sz; i++)
558 if (NodeType ==
atInt) {
559 #pragma omp parallel for
560 for (
int i = 0; i < NumRows; i++) {
561 int vert = Table->IntCols[DstColIdx][i];
562 buckets[vert % sz] = 1;
565 else if (NodeType ==
atStr ) {
566 #pragma omp parallel for
567 for (
int i = 0; i < NumRows; i++) {
568 int vert = (Table->StrColMaps)[DstColIdx][i];
569 buckets[vert % sz] = 1;
573 #pragma omp parallel for reduction(+:cnt)
574 for (
int i = 0; i < sz; i++) {
579 NumNodesEst = sz * log ((
double)sz / cnt);
587 Length = Graph->Reserved();
588 Threads = MaxThreads/2;
589 Delta = (Length + Threads - 1) / Threads;
597 omp_set_num_threads(Threads);
598 #pragma omp parallel for schedule(static, Delta)
599 for (
int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) {
600 if ((uint64_t) Nodes + 1000 >= NumNodesEst) {
606 if (NodeType ==
atInt) {
607 SVal = Table->IntCols[SrcColIdx][CurrRowIdx];
608 DVal = Table->IntCols[DstColIdx][CurrRowIdx];
610 else if (NodeType ==
atStr ) {
611 SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
612 DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
615 if (!Graph->AddOutEdge1(SrcIdx, SVal, DVal)) {
621 __sync_fetch_and_add(&OutVec[SrcIdx].Val, 1);
624 if (!Graph->AddInEdge1(DstIdx, SVal, DVal)) {
630 __sync_fetch_and_add(&InVec[DstIdx].Val, 1);
633 if ((uint64_t) Nodes + 1000 >= NumNodesEst) {
645 Graph->SetNodes(Nodes);
648 for (
int i = 0; i < Length; i++) {
649 Edges += OutVec[i] + InVec[i];
652 for (
int Idx = 0; Idx < Length; Idx++) {
653 if (OutVec[Idx] > 0 || InVec[Idx] > 0) {
654 Graph->ReserveNodeDegs(Idx, InVec[Idx], OutVec[Idx]);
659 Length = Graph->Reserved();
660 Threads = MaxThreads;
661 Delta = (Length + Threads - 1) / Threads;
663 omp_set_num_threads(Threads);
664 #pragma omp parallel for schedule(static,Delta)
665 for (
int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) {
667 if (NodeType ==
atInt) {
668 SVal = Table->IntCols[SrcColIdx][CurrRowIdx];
669 DVal = Table->IntCols[DstColIdx][CurrRowIdx];
671 else if (NodeType ==
atStr) {
672 SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
673 DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
676 Graph->AddOutEdge2(SVal, DVal);
677 Graph->AddInEdge2(SVal, DVal);
681 Length = Graph->Reserved();
682 Threads = MaxThreads*2;
683 Delta = (Length + Threads - 1) / Threads;
685 omp_set_num_threads(Threads);
686 #pragma omp parallel for schedule(dynamic)
687 for (
int Idx = 0; Idx < Length; Idx++) {
688 if (OutVec[Idx] > 0 || InVec[Idx] > 0) {
689 Graph->SortEdges(Idx, InVec[Idx], OutVec[Idx]);
697 template<
class PGraphMP>
699 const TStr& SrcCol,
const TStr& DstCol,
705 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
706 const TInt DstColIdx = Table->GetColIdx(DstCol);
707 const TInt NumRows = Table->GetNumValidRows();
709 const TAttrType NodeType = Table->GetColType(SrcCol);
710 Assert(NodeType == Table->GetColType(DstCol));
713 TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
719 #pragma omp parallel sections num_threads(4)
722 { SrcCol1.Reserve(NumRows, NumRows); }
724 { EdgeCol1.Reserve(NumRows, NumRows); }
726 { DstCol2.Reserve(NumRows, NumRows); }
728 { EdgeCol2.Reserve(NumRows, NumRows); }
734 Table->GetPartitionRanges(Partitions, omp_get_max_threads());
735 TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
740 omp_set_num_threads(omp_get_max_threads());
741 if (NodeType ==
atInt) {
742 #pragma omp parallel for schedule(static)
743 for (
int i = 0; i < Partitions.
Len(); i++) {
746 while (RowI < EndI) {
749 EdgeCol1[RowId] = RowId;
751 EdgeCol2[RowId] = RowId;
756 else if (NodeType ==
atStr) {
757 #pragma omp parallel for schedule(static)
758 for (
int i = 0; i < Partitions.
Len(); i++) {
761 while (RowI < EndI) {
764 EdgeCol1[RowId] = RowId;
766 EdgeCol2[RowId] = RowId;
774 omp_set_num_threads(omp_get_max_threads());
777 #pragma omp single nowait
780 #pragma omp task untied shared(SrcCol1, EdgeCol1)
784 #pragma omp single nowait
787 #pragma omp task untied shared(EdgeCol2, DstCol2)
798 TInt NumThreads = omp_get_max_threads();
799 TInt PartSize = (NumRows/NumThreads);
803 TIntV SrcOffsets, DstOffsets;
805 for (
TInt i = 1; i < NumThreads; i++) {
806 TInt CurrOffset = i * PartSize;
807 while (CurrOffset < (i+1) * PartSize &&
808 SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
812 if (CurrOffset < (i+1) * PartSize) { SrcOffsets.
Add(CurrOffset); }
814 SrcOffsets.
Add(NumRows);
817 for (
TInt i = 1; i < NumThreads; i++) {
818 TInt CurrOffset = i * PartSize;
819 while (CurrOffset < (i+1) * PartSize &&
820 DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
824 if (CurrOffset < (i+1) * PartSize) { DstOffsets.
Add(CurrOffset); }
826 DstOffsets.
Add(NumRows);
828 TInt SrcPartCnt = SrcOffsets.
Len()-1;
829 TInt DstPartCnt = DstOffsets.
Len()-1;
832 TIntV SrcNodeCounts, DstNodeCounts;
833 SrcNodeCounts.
Reserve(SrcPartCnt, SrcPartCnt);
834 DstNodeCounts.
Reserve(DstPartCnt, DstPartCnt);
836 #pragma omp parallel for schedule(dynamic)
837 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
838 if (t < SrcPartCnt) {
840 if (SrcOffsets[i] != SrcOffsets[i+1]) {
841 SrcNodeCounts[i] = 1;
842 TInt CurrNode = SrcCol1[SrcOffsets[i]];
843 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
844 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
845 if (j < SrcOffsets[i+1]) {
847 CurrNode = SrcCol1[j];
852 TInt i = t - SrcPartCnt;
853 if (DstOffsets[i] != DstOffsets[i+1]) {
854 DstNodeCounts[i] = 1;
855 TInt CurrNode = DstCol2[DstOffsets[i]];
856 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
857 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
858 if (j < DstOffsets[i+1]) {
860 CurrNode = DstCol2[j];
867 TInt TotalSrcNodes = 0;
869 for (
int i = 0; i < SrcPartCnt; i++) {
870 SrcIdOffsets.
Add(TotalSrcNodes);
871 TotalSrcNodes += SrcNodeCounts[i];
874 TInt TotalDstNodes = 0;
876 for (
int i = 0; i < DstPartCnt; i++) {
877 DstIdOffsets.
Add(TotalDstNodes);
878 TotalDstNodes += DstNodeCounts[i];
884 TIntPrV SrcNodeIds, DstNodeIds;
885 #pragma omp parallel sections
888 { SrcNodeIds.
Reserve(TotalSrcNodes, TotalSrcNodes); }
890 { DstNodeIds.
Reserve(TotalDstNodes, TotalDstNodes); }
894 #pragma omp parallel for schedule(dynamic)
895 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
896 if (t < SrcPartCnt) {
898 if (SrcOffsets[i] != SrcOffsets[i+1]) {
899 TInt CurrNode = SrcCol1[SrcOffsets[i]];
900 TInt ThreadOffset = SrcIdOffsets[i];
901 SrcNodeIds[ThreadOffset] =
TIntPr(CurrNode, SrcOffsets[i]);
903 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
904 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
905 if (j < SrcOffsets[i+1]) {
906 CurrNode = SrcCol1[j];
907 SrcNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
913 TInt i = t - SrcPartCnt;
914 if (DstOffsets[i] != DstOffsets[i+1]) {
915 TInt CurrNode = DstCol2[DstOffsets[i]];
916 TInt ThreadOffset = DstIdOffsets[i];
917 DstNodeIds[ThreadOffset] =
TIntPr(CurrNode, DstOffsets[i]);
919 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
920 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
921 if (j < DstOffsets[i+1]) {
922 CurrNode = DstCol2[j];
923 DstNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
935 Nodes.
Reserve(TotalSrcNodes+TotalDstNodes);
938 while (i < TotalSrcNodes && j < TotalDstNodes) {
939 if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
940 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, j));
943 }
else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
944 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1));
947 Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j));
951 for (; i < TotalSrcNodes; i++) { Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1)); }
952 for (; j < TotalDstNodes; j++) { Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j)); }
957 PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
965 #pragma omp parallel for schedule(static,100)
966 for (
int m = 0; m < NumNodes; m++) {
972 TInt Offset = SrcNodeIds[i].GetVal2();
973 TInt Sz = EdgeCol1.Len()-Offset;
974 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
979 TInt Offset = DstNodeIds[j].GetVal2();
980 TInt Sz = EdgeCol2.Len()-Offset;
981 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
985 Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
987 Graph->SetNodes(NumNodes);
991 omp_set_num_threads(omp_get_max_threads());
992 if (NodeType ==
atInt) {
993 #pragma omp parallel for schedule(static)
994 for (
int i = 0; i < Partitions.
Len(); i++) {
997 while (RowI < EndI) {
1001 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1003 for (
TInt ea_i = 0; ea_i < EdgeAttrV.
Len(); ea_i++) {
1004 TStr ColName = EdgeAttrV[ea_i];
1005 TAttrType T = Table->GetColType(ColName);
1006 TInt Index = Table->GetColIdx(ColName);
1009 Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName);
1012 Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName);
1015 Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName);
1019 if ((Table->SrcNodeAttrV).Len() > 0) {
1020 Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
1023 if ((Table->DstNodeAttrV).Len() > 0) {
1024 Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
1029 else if (NodeType ==
atStr) {
1030 #pragma omp parallel for schedule(static)
1031 for (
int i = 0; i < Partitions.
Len(); i++) {
1034 while (RowI < EndI) {
1038 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1040 for (
TInt ea_i = 0; ea_i < EdgeAttrV.
Len(); ea_i++) {
1041 TStr ColName = EdgeAttrV[ea_i];
1042 TAttrType T = Table->GetColType(ColName);
1043 TInt Index = Table->GetColIdx(ColName);
1046 Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName);
1049 Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName);
1052 Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName);
1056 if ((Table->SrcNodeAttrV).Len() > 0) {
1057 Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
1060 if ((Table->DstNodeAttrV).Len() > 0) {
1061 Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
1070 if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) {
1071 for (
typename PGraphMP::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) {
1072 TInt NId = NodeI.GetId();
1073 if (NodeIntAttrs.
IsKey(NId)) {
1076 TInt AttrVal = Table->AggregateVector<
TInt>(it.GetDat(), AggrPolicy);
1077 Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey());
1080 if (NodeFltAttrs.
IsKey(NId)) {
1083 TFlt AttrVal = Table->AggregateVector<
TFlt>(it.GetDat(), AggrPolicy);
1084 Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey());
1087 if (NodeStrAttrs.
IsKey(NId)) {
1090 TStr AttrVal = Table->AggregateVector<
TStr>(it.GetDat(), AggrPolicy);
1091 Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey());
1098 Graph->SetEdges(NumRows);
1108 template<
class PGraphMP>
1113 return ToNetworkMP<PGraphMP>(Table, SrcCol, DstCol, V,AggrPolicy);
1119 template<
class PGraphMP>
1121 const TStr& SrcCol,
const TStr& DstCol,
1127 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
1128 const TInt DstColIdx = Table->GetColIdx(DstCol);
1129 const TInt NumRows = Table->NumValidRows;
1131 const TAttrType NodeType = Table->GetColType(SrcCol);
1132 Assert(NodeType == Table->GetColType(DstCol));
1136 TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
1138 #pragma omp parallel sections num_threads(4)
1141 { SrcCol1.
Reserve(NumRows, NumRows); }
1143 { EdgeCol1.Reserve(NumRows, NumRows); }
1145 { DstCol2.Reserve(NumRows, NumRows); }
1147 { EdgeCol2.Reserve(NumRows, NumRows); }
1153 const int NThreads = 40;
1154 Table->GetPartitionRanges(Partitions, NThreads);
1155 TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
1160 if (NodeType ==
atInt) {
1161 #pragma omp parallel for schedule(static)
1162 for (
int i = 0; i < Partitions.
Len(); i++) {
1165 while (RowI < EndI) {
1168 EdgeCol1[RowId] = RowId;
1170 EdgeCol2[RowId] = RowId;
1175 else if (NodeType ==
atStr) {
1176 #pragma omp parallel for schedule(static)
1177 for (
int i = 0; i < Partitions.
Len(); i++) {
1180 while (RowI < EndI) {
1183 EdgeCol1[RowId] = RowId;
1185 EdgeCol2[RowId] = RowId;
1197 int Parts[NThreads+1];
1198 for (
int i = 0; i < NThreads; i++) {
1199 Parts[i] = NumRows.
Val / NThreads * i;
1201 Parts[NThreads] = NumRows;
1208 TInt ExtremePoints[4][NThreads];
1209 omp_set_num_threads(omp_get_max_threads());
1210 #pragma omp parallel
1212 #pragma omp for schedule(static) nowait
1213 for (
int i = 0; i < NThreads; i++) {
1214 TInt StartPos = Parts[i];
1215 TInt EndPos = Parts[i+1]-1;
1218 ExtremePoints[0][i] = SrcCol1[StartPos];
1219 ExtremePoints[2][i] = SrcCol1[EndPos];
1221 #pragma omp for schedule(static) nowait
1222 for (
int i = 0; i < NThreads; i++) {
1223 TInt StartPos = Parts[i];
1224 TInt EndPos = Parts[i+1]-1;
1227 ExtremePoints[1][i] = DstCol2[StartPos];
1228 ExtremePoints[3][i] = DstCol2[EndPos];
1236 TInt MinId(INT_MAX);
1237 for (
int j = 0; j < 2; j++) {
1238 for (
int i = 0; i < NThreads; i++) {
1239 if (MinId > ExtremePoints[j][i]) { MinId = ExtremePoints[j][i]; }
1243 for (
int j = 2; j < 4; j++) {
1244 for (
int i = 0; i < NThreads; i++) {
1245 if (MaxId < ExtremePoints[j][i]) { MaxId = ExtremePoints[j][i]; }
1254 const int NumCollectors = 20;
1255 int Range = MaxId.
Val - MinId.
Val;
1256 TIntV IdRanges(NumCollectors+1);
1257 for (
int j = 0; j < NumCollectors; j++) {
1258 IdRanges[j] = MinId + Range/NumCollectors*j;
1260 IdRanges[NumCollectors] = MaxId+1;
1265 int SrcOffsets[NThreads][NumCollectors+1];
1266 #pragma omp parallel for schedule(static)
1267 for (
int i = 0; i < NThreads; i++) {
1268 int CollectorId = 0;
1269 for (
int j = Parts[i]; j < Parts[i+1]; j++) {
1270 while (SrcCol1[j] >= IdRanges[CollectorId]) {
1271 SrcOffsets[i][CollectorId++] = j;
1274 while (CollectorId <= NumCollectors) {
1275 SrcOffsets[i][CollectorId++] = Parts[i+1];
1278 int DstOffsets[NThreads][NumCollectors+1];
1279 #pragma omp parallel for schedule(static)
1280 for (
int i = 0; i < NThreads; i++) {
1281 int CollectorId = 0;
1282 for (
int j = Parts[i]; j < Parts[i+1]; j++) {
1283 while (DstCol2[j] >= IdRanges[CollectorId]) {
1284 DstOffsets[i][CollectorId++] = j;
1287 while (CollectorId <= NumCollectors) {
1288 DstOffsets[i][CollectorId++] = Parts[i+1];
1302 TIntV SrcCollectorOffsets(NumCollectors+1);
1303 SrcCollectorOffsets[0] = 0;
1304 for (
int k = 0; k < NumCollectors; k++) {
1306 for (
int i = 0; i < NThreads; i++) {
1307 SumOffset += SrcOffsets[i][k+1] - SrcOffsets[i][k];
1309 SrcCollectorOffsets[k+1] = SrcCollectorOffsets[k] + SumOffset;
1311 TIntV DstCollectorOffsets(NumCollectors+1);
1312 DstCollectorOffsets[0] = 0;
1313 for (
int k = 0; k < NumCollectors; k++) {
1315 for (
int i = 0; i < NThreads; i++) {
1316 SumOffset += DstOffsets[i][k+1] - DstOffsets[i][k];
1318 DstCollectorOffsets[k+1] = DstCollectorOffsets[k] + SumOffset;
1327 TIntV SrcCol3, EdgeCol3, EdgeCol4, DstCol4;
1328 #pragma omp parallel sections num_threads(4)
1331 { SrcCol3.
Reserve(NumRows, NumRows); }
1333 { EdgeCol3.
Reserve(NumRows, NumRows); }
1335 { DstCol4.
Reserve(NumRows, NumRows); }
1337 { EdgeCol4.
Reserve(NumRows, NumRows); }
1340 TIntV SrcNodeCounts(NumCollectors), DstNodeCounts(NumCollectors);
1341 #pragma omp parallel for schedule(static)
1342 for (
int k = 0; k < NumCollectors; k++) {
1343 int ind = SrcCollectorOffsets[k];
1344 for (
int i = 0; i < NThreads; i++) {
1345 for (
int j = SrcOffsets[i][k]; j < SrcOffsets[i][k+1]; j++) {
1346 SrcCol3[ind] = SrcCol1[j];
1347 EdgeCol3[ind] = EdgeCol1[j];
1351 TTable::QSortKeyVal(SrcCol3, EdgeCol3, SrcCollectorOffsets[k], SrcCollectorOffsets[k+1]-1);
1353 if (SrcCollectorOffsets[k+1] > SrcCollectorOffsets[k]) {
1355 for (
int j = SrcCollectorOffsets[k]+1; j < SrcCollectorOffsets[k+1]; j++) {
1356 if (SrcCol3[j] != SrcCol3[j-1]) { SrcCount++; }
1359 SrcNodeCounts[k] = SrcCount;
1361 ind = DstCollectorOffsets[k];
1362 for (
int i = 0; i < NThreads; i++) {
1363 for (
int j = DstOffsets[i][k]; j < DstOffsets[i][k+1]; j++) {
1364 DstCol4[ind] = DstCol2[j];
1365 EdgeCol4[ind] = EdgeCol2[j];
1369 TTable::QSortKeyVal(DstCol4, EdgeCol4, DstCollectorOffsets[k], DstCollectorOffsets[k+1]-1);
1371 if (DstCollectorOffsets[k+1] > DstCollectorOffsets[k]) {
1373 for (
int j = DstCollectorOffsets[k]+1; j < DstCollectorOffsets[k+1]; j++) {
1374 if (DstCol4[j] != DstCol4[j-1]) { DstCount++; }
1377 DstNodeCounts[k] = DstCount;
1380 TInt TotalSrcNodes = 0;
1382 for (
int i = 0; i < NumCollectors; i++) {
1383 SrcIdOffsets.
Add(TotalSrcNodes);
1384 TotalSrcNodes += SrcNodeCounts[i];
1395 TInt TotalDstNodes = 0;
1397 for (
int i = 0; i < NumCollectors; i++) {
1398 DstIdOffsets.
Add(TotalDstNodes);
1399 TotalDstNodes += DstNodeCounts[i];
1403 TIntPrV SrcNodeIds, DstNodeIds;
1404 #pragma omp parallel sections
1407 { SrcNodeIds.
Reserve(TotalSrcNodes, TotalSrcNodes); }
1409 { DstNodeIds.
Reserve(TotalDstNodes, TotalDstNodes); }
1413 #pragma omp parallel for schedule(dynamic)
1414 for (
int t = 0; t < 2*NumCollectors; t++) {
1415 if (t < NumCollectors) {
1417 if (SrcCollectorOffsets[i] < SrcCollectorOffsets[i+1]) {
1418 TInt CurrNode = SrcCol3[SrcCollectorOffsets[i]];
1419 TInt ThreadOffset = SrcIdOffsets[i];
1420 SrcNodeIds[ThreadOffset] =
TIntPr(CurrNode, SrcCollectorOffsets[i]);
1422 for (
TInt j = SrcCollectorOffsets[i]+1; j < SrcCollectorOffsets[i+1]; j++) {
1423 while (j < SrcCollectorOffsets[i+1] && SrcCol3[j] == CurrNode) { j++; }
1424 if (j < SrcCollectorOffsets[i+1]) {
1425 CurrNode = SrcCol3[j];
1426 SrcNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
1432 TInt i = t - NumCollectors;
1433 if (DstCollectorOffsets[i] < DstCollectorOffsets[i+1]) {
1434 TInt CurrNode = DstCol4[DstCollectorOffsets[i]];
1435 TInt ThreadOffset = DstIdOffsets[i];
1436 DstNodeIds[ThreadOffset] =
TIntPr(CurrNode, DstCollectorOffsets[i]);
1438 for (
TInt j = DstCollectorOffsets[i]+1; j < DstCollectorOffsets[i+1]; j++) {
1439 while (j < DstCollectorOffsets[i+1] && DstCol4[j] == CurrNode) { j++; }
1440 if (j < DstCollectorOffsets[i+1]) {
1441 CurrNode = DstCol4[j];
1442 DstNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
1454 Nodes.
Reserve(TotalSrcNodes+TotalDstNodes);
1457 while (i < TotalSrcNodes && j < TotalDstNodes) {
1458 if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
1459 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, j));
1462 }
else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
1463 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1));
1466 Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j));
1470 for (; i < TotalSrcNodes; i++) { Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1)); }
1471 for (; j < TotalDstNodes; j++) { Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j)); }
1476 PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
1484 #pragma omp parallel for schedule(static,100)
1485 for (
int m = 0; m < NumNodes; m++) {
1489 Nodes[m].
GetVal(n, i, j);
1491 TInt Offset = SrcNodeIds[i].GetVal2();
1492 TInt Sz = EdgeCol3.
Len()-Offset;
1493 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
1498 TInt Offset = DstNodeIds[j].GetVal2();
1499 TInt Sz = EdgeCol4.
Len()-Offset;
1500 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
1504 Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
1506 Graph->SetNodes(NumNodes);
1510 omp_set_num_threads(omp_get_max_threads());
1511 if (NodeType ==
atInt) {
1512 #pragma omp parallel for schedule(static)
1513 for (
int i = 0; i < Partitions.
Len(); i++) {
1516 while (RowI < EndI) {
1520 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1525 else if (NodeType ==
atStr) {
1526 #pragma omp parallel for schedule(static)
1527 for (
int i = 0; i < Partitions.
Len(); i++) {
1530 while (RowI < EndI) {
1534 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1539 Graph->SetEdges(NumRows);
1548 template<
class PGraphMP>
1553 return ToNetworkMP2<PGraphMP>(Table, SrcCol, DstCol, V, V, V, AggrPolicy);
1555 #endif // GCC_ATOMIC
1573 template<
class PGraph>
1575 const TStr& SrcCol,
const TStr& DstCol,
1578 PGraph Graph = PGraph::TObj::New();
1580 const TAttrType NodeType = Table->GetColType(SrcCol);
1581 Assert(NodeType == Table->GetColType(DstCol));
1582 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
1583 const TInt DstColIdx = Table->GetColIdx(DstCol);
1594 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
1595 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
1601 if (NodeType ==
atFlt) {
1602 TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
1603 SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
1604 TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
1605 DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
1607 else if (NodeType ==
atInt || NodeType ==
atStr) {
1608 if (NodeType ==
atInt) {
1609 SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
1610 DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
1613 SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
1615 DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
1618 if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
1619 if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
1625 Graph->AddEdge(SVal, DVal, CurrRowIdx);
1628 for (
TInt i = 0; i < EdgeAttrV.
Len(); i++) {
1629 TStr ColName = EdgeAttrV[i];
1630 TAttrType T = Table->GetColType(ColName);
1631 TInt Index = Table->GetColIdx(ColName);
1634 Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
1637 Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
1640 Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
1652 template<
class PGraphMP>
1654 const TStr& SrcCol,
const TStr& DstCol,
1660 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
1661 const TInt DstColIdx = Table->GetColIdx(DstCol);
1662 const TInt NumRows = Table->GetNumValidRows();
1664 const TAttrType NodeType = Table->GetColType(SrcCol);
1665 Assert(NodeType == Table->GetColType(DstCol));
1667 TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
1673 #pragma omp parallel sections num_threads(4)
1676 { SrcCol1.Reserve(NumRows, NumRows); }
1678 { EdgeCol1.Reserve(NumRows, NumRows); }
1680 { DstCol2.Reserve(NumRows, NumRows); }
1682 { EdgeCol2.Reserve(NumRows, NumRows); }
1688 Table->GetPartitionRanges(Partitions, omp_get_max_threads());
1689 TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
1695 omp_set_num_threads(omp_get_max_threads());
1696 if (NodeType ==
atInt) {
1697 #pragma omp parallel for schedule(static)
1698 for (
int i = 0; i < Partitions.
Len(); i++) {
1701 while (RowI < EndI) {
1704 EdgeCol1[RowId] = RowId;
1706 EdgeCol2[RowId] = RowId;
1711 else if (NodeType ==
atStr) {
1712 #pragma omp parallel for schedule(static)
1713 for (
int i = 0; i < Partitions.
Len(); i++) {
1716 while (RowI < EndI) {
1719 EdgeCol1[RowId] = RowId;
1721 EdgeCol2[RowId] = RowId;
1729 omp_set_num_threads(omp_get_max_threads());
1730 #pragma omp parallel
1732 #pragma omp single nowait
1735 #pragma omp task untied shared(SrcCol1, EdgeCol1)
1739 #pragma omp single nowait
1742 #pragma omp task untied shared(EdgeCol2, DstCol2)
1747 #pragma omp taskwait
1753 TInt NumThreads = omp_get_max_threads();
1754 TInt PartSize = (NumRows/NumThreads);
1758 TIntV SrcOffsets, DstOffsets;
1760 for (
TInt i = 1; i < NumThreads; i++) {
1761 TInt CurrOffset = i * PartSize;
1762 while (CurrOffset < (i+1) * PartSize &&
1763 SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
1767 if (CurrOffset < (i+1) * PartSize) { SrcOffsets.
Add(CurrOffset); }
1769 SrcOffsets.
Add(NumRows);
1772 for (
TInt i = 1; i < NumThreads; i++) {
1773 TInt CurrOffset = i * PartSize;
1774 while (CurrOffset < (i+1) * PartSize &&
1775 DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
1779 if (CurrOffset < (i+1) * PartSize) { DstOffsets.
Add(CurrOffset); }
1781 DstOffsets.
Add(NumRows);
1783 TInt SrcPartCnt = SrcOffsets.
Len()-1;
1784 TInt DstPartCnt = DstOffsets.
Len()-1;
1787 TIntV SrcNodeCounts, DstNodeCounts;
1788 SrcNodeCounts.
Reserve(SrcPartCnt, SrcPartCnt);
1789 DstNodeCounts.
Reserve(DstPartCnt, DstPartCnt);
1791 #pragma omp parallel for schedule(dynamic)
1792 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
1793 if (t < SrcPartCnt) {
1795 if (SrcOffsets[i] != SrcOffsets[i+1]) {
1796 SrcNodeCounts[i] = 1;
1797 TInt CurrNode = SrcCol1[SrcOffsets[i]];
1798 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
1799 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
1800 if (j < SrcOffsets[i+1]) {
1802 CurrNode = SrcCol1[j];
1807 TInt i = t - SrcPartCnt;
1808 if (DstOffsets[i] != DstOffsets[i+1]) {
1809 DstNodeCounts[i] = 1;
1810 TInt CurrNode = DstCol2[DstOffsets[i]];
1811 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
1812 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
1813 if (j < DstOffsets[i+1]) {
1815 CurrNode = DstCol2[j];
1822 TInt TotalSrcNodes = 0;
1824 for (
int i = 0; i < SrcPartCnt; i++) {
1825 SrcIdOffsets.
Add(TotalSrcNodes);
1826 TotalSrcNodes += SrcNodeCounts[i];
1829 TInt TotalDstNodes = 0;
1831 for (
int i = 0; i < DstPartCnt; i++) {
1832 DstIdOffsets.
Add(TotalDstNodes);
1833 TotalDstNodes += DstNodeCounts[i];
1839 TIntPrV SrcNodeIds, DstNodeIds;
1840 #pragma omp parallel sections
1843 { SrcNodeIds.
Reserve(TotalSrcNodes, TotalSrcNodes); }
1845 { DstNodeIds.
Reserve(TotalDstNodes, TotalDstNodes); }
1849 #pragma omp parallel for schedule(dynamic)
1850 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
1851 if (t < SrcPartCnt) {
1853 if (SrcOffsets[i] != SrcOffsets[i+1]) {
1854 TInt CurrNode = SrcCol1[SrcOffsets[i]];
1855 TInt ThreadOffset = SrcIdOffsets[i];
1856 SrcNodeIds[ThreadOffset] =
TIntPr(CurrNode, SrcOffsets[i]);
1858 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
1859 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
1860 if (j < SrcOffsets[i+1]) {
1861 CurrNode = SrcCol1[j];
1862 SrcNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
1868 TInt i = t - SrcPartCnt;
1869 if (DstOffsets[i] != DstOffsets[i+1]) {
1870 TInt CurrNode = DstCol2[DstOffsets[i]];
1871 TInt ThreadOffset = DstIdOffsets[i];
1872 DstNodeIds[ThreadOffset] =
TIntPr(CurrNode, DstOffsets[i]);
1874 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
1875 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
1876 if (j < DstOffsets[i+1]) {
1877 CurrNode = DstCol2[j];
1878 DstNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
1890 Nodes.
Reserve(TotalSrcNodes+TotalDstNodes);
1893 while (i < TotalSrcNodes && j < TotalDstNodes) {
1894 if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
1895 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, j));
1898 }
else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
1899 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1));
1902 Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j));
1906 for (; i < TotalSrcNodes; i++) { Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1)); }
1907 for (; j < TotalDstNodes; j++) { Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j)); }
1912 PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
1920 #pragma omp parallel for schedule(static,100)
1921 for (
int m = 0; m < NumNodes; m++) {
1925 Nodes[m].
GetVal(n, i, j);
1927 TInt Offset = SrcNodeIds[i].GetVal2();
1928 TInt Sz = EdgeCol1.Len()-Offset;
1929 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
1934 TInt Offset = DstNodeIds[j].GetVal2();
1935 TInt Sz = EdgeCol2.Len()-Offset;
1936 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
1940 Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
1942 Graph->SetNodes(NumNodes);
1946 omp_set_num_threads(omp_get_max_threads());
1947 if (NodeType ==
atInt) {
1948 #pragma omp parallel for schedule(static)
1949 for (
int i = 0; i < Partitions.
Len(); i++) {
1952 while (RowI < EndI) {
1956 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1961 else if (NodeType ==
atStr) {
1962 #pragma omp parallel for schedule(static)
1963 for (
int i = 0; i < Partitions.
Len(); i++) {
1966 while (RowI < EndI) {
1970 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
1978 Graph->SetEdges(NumRows);
1979 Graph->SetMxEId(NumRows);
1984 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
1985 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
1988 for (
TInt ea_i = 0; ea_i < EdgeAttrV.
Len(); ea_i++) {
1989 TStr ColName = EdgeAttrV[ea_i];
1990 TAttrType T = Table->GetColType(ColName);
1991 TInt Index = Table->GetColIdx(ColName);
1994 Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
1997 Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
2000 Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
2013 template<
class PGraph>
2015 const TStr& SrcCol,
const TStr& DstCol,
2018 PGraph Graph = PGraph::TObj::New();
2020 const TAttrType NodeType = Table->GetColType(SrcCol);
2021 Assert(NodeType == Table->GetColType(DstCol));
2022 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
2023 const TInt DstColIdx = Table->GetColIdx(DstCol);
2026 const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol);
2027 const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol);
2042 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
2043 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
2049 if (NodeType ==
atFlt) {
2050 TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
2051 SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
2052 TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
2053 DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
2055 else if (NodeType ==
atInt || NodeType ==
atStr) {
2056 if (NodeType ==
atInt) {
2057 SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
2058 DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
2061 SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
2063 DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
2066 if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
2067 if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
2073 Graph->AddEdge(SVal, DVal, CurrRowIdx);
2076 for (
TInt i = 0; i < EdgeAttrV.
Len(); i++) {
2077 TStr ColName = EdgeAttrV[i];
2078 TAttrType T = Table->GetColType(ColName);
2079 TInt Index = Table->GetColIdx(ColName);
2082 Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
2085 Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
2088 Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
2096 if (NodeAttrV.
Len() > 0) {
2097 for (
int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) {
2098 if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) {
2102 if (NodeTypeN ==
atInt) {
2103 NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx];
2105 else if (NodeTypeN ==
atStr){
2106 NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx];
2108 for (
TInt i = 0; i < NodeAttrV.
Len(); i++) {
2109 TStr ColName = NodeAttrV[i];
2110 TAttrType T = NodeTable->GetColType(ColName);
2111 TInt Index = NodeTable->GetColIdx(ColName);
2114 Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName);
2117 Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName);
2120 Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName);
2133 template<
class PGraphMP>
2135 const TStr& SrcCol,
const TStr& DstCol,
2141 const TInt SrcColIdx = Table->GetColIdx(SrcCol);
2142 const TInt DstColIdx = Table->GetColIdx(DstCol);
2143 const TInt NumRows = Table->GetNumValidRows();
2145 const TAttrType NodeType = Table->GetColType(SrcCol);
2146 Assert(NodeType == Table->GetColType(DstCol));
2149 TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
2151 const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol);
2152 const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol);
2157 #pragma omp parallel sections num_threads(4)
2160 { SrcCol1.Reserve(NumRows, NumRows); }
2162 { EdgeCol1.Reserve(NumRows, NumRows); }
2164 { DstCol2.Reserve(NumRows, NumRows); }
2166 { EdgeCol2.Reserve(NumRows, NumRows); }
2172 Table->GetPartitionRanges(Partitions, omp_get_max_threads());
2173 TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
2178 omp_set_num_threads(omp_get_max_threads());
2179 if (NodeType ==
atInt) {
2180 #pragma omp parallel for schedule(static)
2181 for (
int i = 0; i < Partitions.
Len(); i++) {
2184 while (RowI < EndI) {
2187 EdgeCol1[RowId] = RowId;
2189 EdgeCol2[RowId] = RowId;
2194 else if (NodeType ==
atStr) {
2195 #pragma omp parallel for schedule(static)
2196 for (
int i = 0; i < Partitions.
Len(); i++) {
2199 while (RowI < EndI) {
2202 EdgeCol1[RowId] = RowId;
2204 EdgeCol2[RowId] = RowId;
2212 omp_set_num_threads(omp_get_max_threads());
2213 #pragma omp parallel
2215 #pragma omp single nowait
2218 #pragma omp task untied shared(SrcCol1, EdgeCol1)
2222 #pragma omp single nowait
2225 #pragma omp task untied shared(EdgeCol2, DstCol2)
2230 #pragma omp taskwait
2236 TInt NumThreads = omp_get_max_threads();
2237 TInt PartSize = (NumRows/NumThreads);
2241 TIntV SrcOffsets, DstOffsets;
2243 for (
TInt i = 1; i < NumThreads; i++) {
2244 TInt CurrOffset = i * PartSize;
2245 while (CurrOffset < (i+1) * PartSize &&
2246 SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
2250 if (CurrOffset < (i+1) * PartSize) { SrcOffsets.
Add(CurrOffset); }
2252 SrcOffsets.
Add(NumRows);
2255 for (
TInt i = 1; i < NumThreads; i++) {
2256 TInt CurrOffset = i * PartSize;
2257 while (CurrOffset < (i+1) * PartSize &&
2258 DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
2262 if (CurrOffset < (i+1) * PartSize) { DstOffsets.
Add(CurrOffset); }
2264 DstOffsets.
Add(NumRows);
2266 TInt SrcPartCnt = SrcOffsets.
Len()-1;
2267 TInt DstPartCnt = DstOffsets.
Len()-1;
2270 TIntV SrcNodeCounts, DstNodeCounts;
2271 SrcNodeCounts.
Reserve(SrcPartCnt, SrcPartCnt);
2272 DstNodeCounts.
Reserve(DstPartCnt, DstPartCnt);
2274 #pragma omp parallel for schedule(dynamic)
2275 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
2276 if (t < SrcPartCnt) {
2278 if (SrcOffsets[i] != SrcOffsets[i+1]) {
2279 SrcNodeCounts[i] = 1;
2280 TInt CurrNode = SrcCol1[SrcOffsets[i]];
2281 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
2282 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
2283 if (j < SrcOffsets[i+1]) {
2285 CurrNode = SrcCol1[j];
2290 TInt i = t - SrcPartCnt;
2291 if (DstOffsets[i] != DstOffsets[i+1]) {
2292 DstNodeCounts[i] = 1;
2293 TInt CurrNode = DstCol2[DstOffsets[i]];
2294 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
2295 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
2296 if (j < DstOffsets[i+1]) {
2298 CurrNode = DstCol2[j];
2305 TInt TotalSrcNodes = 0;
2307 for (
int i = 0; i < SrcPartCnt; i++) {
2308 SrcIdOffsets.
Add(TotalSrcNodes);
2309 TotalSrcNodes += SrcNodeCounts[i];
2312 TInt TotalDstNodes = 0;
2314 for (
int i = 0; i < DstPartCnt; i++) {
2315 DstIdOffsets.
Add(TotalDstNodes);
2316 TotalDstNodes += DstNodeCounts[i];
2322 TIntPrV SrcNodeIds, DstNodeIds;
2323 #pragma omp parallel sections
2326 { SrcNodeIds.
Reserve(TotalSrcNodes, TotalSrcNodes); }
2328 { DstNodeIds.
Reserve(TotalDstNodes, TotalDstNodes); }
2332 #pragma omp parallel for schedule(dynamic)
2333 for (
int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
2334 if (t < SrcPartCnt) {
2336 if (SrcOffsets[i] != SrcOffsets[i+1]) {
2337 TInt CurrNode = SrcCol1[SrcOffsets[i]];
2338 TInt ThreadOffset = SrcIdOffsets[i];
2339 SrcNodeIds[ThreadOffset] =
TIntPr(CurrNode, SrcOffsets[i]);
2341 for (
TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
2342 while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
2343 if (j < SrcOffsets[i+1]) {
2344 CurrNode = SrcCol1[j];
2345 SrcNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
2351 TInt i = t - SrcPartCnt;
2352 if (DstOffsets[i] != DstOffsets[i+1]) {
2353 TInt CurrNode = DstCol2[DstOffsets[i]];
2354 TInt ThreadOffset = DstIdOffsets[i];
2355 DstNodeIds[ThreadOffset] =
TIntPr(CurrNode, DstOffsets[i]);
2357 for (
TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
2358 while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
2359 if (j < DstOffsets[i+1]) {
2360 CurrNode = DstCol2[j];
2361 DstNodeIds[ThreadOffset+CurrCount] =
TIntPr(CurrNode, j);
2373 Nodes.
Reserve(TotalSrcNodes+TotalDstNodes);
2376 while (i < TotalSrcNodes && j < TotalDstNodes) {
2377 if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
2378 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, j));
2381 }
else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
2382 Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1));
2385 Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j));
2389 for (; i < TotalSrcNodes; i++) { Nodes.
Add(
TIntTr(SrcNodeIds[i].Val1, i, -1)); }
2390 for (; j < TotalDstNodes; j++) { Nodes.
Add(
TIntTr(DstNodeIds[j].Val1, -1, j)); }
2395 PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
2403 #pragma omp parallel for schedule(static,100)
2404 for (
int m = 0; m < NumNodes; m++) {
2408 Nodes[m].
GetVal(n, i, j);
2410 TInt Offset = SrcNodeIds[i].GetVal2();
2411 TInt Sz = EdgeCol1.Len()-Offset;
2412 if (i < SrcNodeIds.
Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
2417 TInt Offset = DstNodeIds[j].GetVal2();
2418 TInt Sz = EdgeCol2.Len()-Offset;
2419 if (j < DstNodeIds.
Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
2423 Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
2425 Graph->SetNodes(NumNodes);
2429 omp_set_num_threads(omp_get_max_threads());
2430 if (NodeType ==
atInt) {
2431 #pragma omp parallel for schedule(static)
2432 for (
int i = 0; i < Partitions.
Len(); i++) {
2435 while (RowI < EndI) {
2439 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
2444 else if (NodeType ==
atStr) {
2445 #pragma omp parallel for schedule(static)
2446 for (
int i = 0; i < Partitions.
Len(); i++) {
2449 while (RowI < EndI) {
2453 Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
2461 Graph->SetEdges(NumRows);
2462 Graph->SetMxEId(NumRows);
2467 for (
int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
2468 if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
2471 for (
TInt ea_i = 0; ea_i < EdgeAttrV.
Len(); ea_i++) {
2472 TStr ColName = EdgeAttrV[ea_i];
2473 TAttrType T = Table->GetColType(ColName);
2474 TInt Index = Table->GetColIdx(ColName);
2477 Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
2480 Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
2483 Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
2491 if (NodeAttrV.
Len() > 0) {
2492 for (
int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) {
2493 if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) {
2497 if (NodeTypeN ==
atInt) {
2498 NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx];
2500 else if (NodeTypeN ==
atStr){
2501 NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx];
2503 for (
TInt i = 0; i < NodeAttrV.
Len(); i++) {
2504 TStr ColName = NodeAttrV[i];
2505 TAttrType T = NodeTable->GetColType(ColName);
2506 TInt Index = NodeTable->GetColIdx(ColName);
2509 Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName);
2512 Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName);
2515 Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName);
2526 #endif // GCC_ATOMIC
int GetPrimHashCd() const
TPair< TInt, TInt > TIntPr
enum TAttrType_ TAttrType
Types for tables, sparse and dense attributes.
int LoadModeNetToNet(PMMNet Graph, const TStr &Name, PTable Table, const TStr &NCol, TStrV &NodeAttrV)
Loads a mode, with name Name, into the PMMNet from the TTable. NCol specifies the node id column and ...
TInt GetIntAttr(TInt ColIdx) const
Returns value of integer attribute specified by integer column index for current row.
PGraphMP ToGraphMP(PTable Table, const TStr &SrcCol, const TStr &DstCol)
Performs table to graph conversion in parallel using the sort-first algorithm. This is the recommende...
TSizeTy Len() const
Returns the number of elements in the vector.
static PNGraphMP New()
Static constructor that returns a pointer to the graph. Call: PNGraphMP Graph = TNGraphMP::New().
void Start(const TExperiment Exp)
Start a new experiment.
TAttrAggr
Possible policies for aggregating node attributes.
const TDat & GetDat(const TKey &Key) const
Node iterator. Only forward iteration (operator++) is supported.
void CopyUniqueFrom(TVec< TVal, TSizeTy > &Vec, TInt Offset, TInt Sz)
Copy Sz values from Vec starting at Offset.
void Stop(const TExperiment Exp)
Stop the current experiment.
Iterator class for TTable rows.
const TVal & GetVal(const TSizeTy &ValN) const
Returns a reference to the element at position ValN in the vector.
int LoadCrossNetToNet(PMMNet Graph, const TStr &Mode1, const TStr &Mode2, const TStr &CrossName, PTable Table, const TStr &SrcCol, const TStr &DstCol, TStrV &EdgeAttrV)
Loads a crossnet from Mode1 to Mode2, with name CrossName, from the provided TTable. EdgeAttrV specifies edge attributes.
void Clr(const bool &DoDel=true, const TSizeTy &NoDelLim=-1)
Clears the contents of the vector.
PGraph ToGraph(PTable Table, const TStr &SrcCol, const TStr &DstCol, TAttrAggr AggrPolicy)
Sequentially converts the table into a graph with links from nodes in SrcCol to those in DstCol...
int LoadCrossNet(TCrossNet &Graph, PTable Table, const TStr &SrcCol, const TStr &DstCol, TStrV &EdgeAttrV)
Loads the edges from the TTable and EdgeAttrV specifies columns containing edge attributes.
PGraphMP ToNetworkMP(PTable Table, const TStr &SrcCol, const TStr &DstCol, TStrV &SrcAttrV, TStrV &DstAttrV, TStrV &EdgeAttrV, TAttrAggr AggrPolicy)
Does Table to Network conversion in parallel using the sort-first algorithm. This is the recommended ...
int LoadMode(TModeNet &Graph, PTable Table, const TStr &NCol, TStrV &NodeAttrV)
Loads the nodes specified in column NCol from the TTable with the attributes specified in NodeAttrV...
TInt GetRowIdx() const
Gets the id of the row pointed by this iterator.
TInt GetStrMapById(TInt ColIdx) const
Returns integer mapping of a string attribute value specified by string column index for current row...
static void QSortKeyVal(TIntV &Key, TIntV &Val, TInt Start, TInt End)
The nodes of one particular mode in a TMMNet, and their neighbor vectors as TIntV attributes...
PGraphMP ToGraphMP3(PTable Table, const TStr &SrcCol, const TStr &DstCol)
Performs table to graph conversion in parallel. Uses the hash-first method, which is less optimal...
TTriple< TInt, TInt, TInt > TIntTr
void Gen(const TSizeTy &_Vals)
Constructs a vector (an array) of _Vals elements.
void Reserve(const TSizeTy &_MxVals)
Reserves enough memory for the vector to store _MxVals elements.
static TStopwatch * GetInstance()
bool IsKey(const TKey &Key) const
TSizeTy Add()
Adds a new element at the end of the vector, after its current last element.
PGraph ToNetwork(PTable Table, const TStr &SrcCol, const TStr &DstCol, TStrV &SrcAttrV, TStrV &DstAttrV, TStrV &EdgeAttrV, TAttrAggr AggrPolicy)
Converts table to a network. Suitable for PNEANet - Requires node and edge attribute column names as ...
PGraphMP ToNetworkMP2(PTable Table, const TStr &SrcCol, const TStr &DstCol, TStrV &SrcAttrV, TStrV &DstAttrV, TStrV &EdgeAttrV, TAttrAggr AggrPolicy)
Implements table to network conversion in parallel. Not the recommended algorithm, using ToNetworkMP instead.
Implements a single CrossNet consisting of edges between two TModeNets (could be the same TModeNet) ...
Routines to benchmark table operations.