OpenASIP 2.2
Loading...
Searching...
No Matches
BF2ScheduleFront.cc
Go to the documentation of this file.
1/*
2 Copyright (c) 2002-2014 Tampere University.
3
4 This file is part of TTA-Based Codesign Environment (TCE).
5
6 Permission is hereby granted, free of charge, to any person obtaining a
7 copy of this software and associated documentation files (the "Software"),
8 to deal in the Software without restriction, including without limitation
9 the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 and/or sell copies of the Software, and to permit persons to whom the
11 Software is furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 DEALINGS IN THE SOFTWARE.
23 */
24
25/**
26 * @file BF2ScheduleFront.cc
27 *
28 * Definition of BF2ScheduleFront class
29 *
30 * Tries to schedule a group of moves.
31 *
32 * @author Heikki Kultala 2014-2020(heikki.kultala-no.spam-tuni.fi)
33 * @note rating: red
34 */
35
36#include "BF2ScheduleFront.hh"
37#include "BF2Scheduler.hh"
38#include "Move.hh"
39#include "Machine.hh"
40#include "ControlUnit.hh"
43#include "Operation.hh"
44#include "Unit.hh"
45#include "HWOperation.hh"
46#include "BUMoveNodeSelector.hh"
47
48#include "BFScheduleBU.hh"
49#include "BFScheduleTD.hh"
50#include "BFScheduleExact.hh"
51#include "BFDREEarly.hh"
52
53#include "BFSwapOperands.hh"
54#include "BFDropPreShared.hh"
55#include "FUPort.hh"
56#include "Terminal.hh"
57
58//#define DEBUG_BUBBLEFISH_SCHEDULER
59
60bool
62
63#ifdef DEBUG_BUBBLEFISH_SCHEDULER
64 std::cerr << std::endl << "Got: " << mn_.toString()
65 << " to schedule(1)" << std::endl;
66#endif
69 assert(mn2 != NULL);
70
71 bool ok = scheduleFrontFromMove(*mn2);
72 // Do not waste memory by keeping it in the stack of performed
73 // operations.
74 // Not needed anymore. Can calculate again for next front.
75 // Consider sharing between different fronts if still too slow.
76 pathLengthCache_.clear();
77 return ok;
78}
79
80
82 MoveNode* mn2 = &mn;
83 int latest = lc_;
84#ifdef DEBUG_BUBBLEFISH_SCHEDULER
85 std::cerr << "ScheduleFrontFromMove called: " <<mn.toString()<< std::endl;
86#endif
87 while (mn2 != NULL) {
88 if (mn2->move().isControlFlowMove()) {
89 latest = std::min(
90 latest,
91 lc_- targetMachine().controlUnit()->delaySlots());
92 }
93
94 if (!tryToScheduleMoveOuter(*mn2, latest)) {
95#ifdef DEBUG_BUBBLEFISH_SCHEDULER
96 std::cerr << "TryToScheduleMoveOuter " << mn2->toString() <<
97 " failed!, latest now" << latest << std::endl;
98#endif
99 undo();
100 int smallestRMCycle = rm().smallestCycle();
101 if (smallestRMCycle == INT_MAX) {
102 smallestRMCycle = lc_;
103 }
104#ifdef DEBUG_BUBBLEFISH_SCHEDULER
105 std::cerr << "Latest after outer fail: " << latest << std::endl;
106 std::cerr << "smallest rm cycle: " << smallestRMCycle <<
107 " max latency+1: " << targetMachine().maximumLatency()+1
108 << std::endl;
109#endif
110 if (latest < 0 || latest <
111 (smallestRMCycle - (targetMachine().maximumLatency()+1))) {
112 if (Application::verboseLevel() > 1 ||
113 rm().initiationInterval() == 0) {
114 std::cerr << "Retry to too early cycle. cannot schedule: "
115 << mn2->toString()
116 << std::endl;
117 ddg().writeToDotFile("fail.dot");
118 }
119 return false;
120 } else {
121#ifdef DEBUG_BUBBLEFISH_SCHEDULER
122 std::cerr << "OK or retry at earlier cycle.." << std::endl;
123#endif
124 }
125 }
126#ifdef DEBUG_BUBBLEFISH_SCHEDULER
127 std::cerr << "TryToScheduleMoveOuter ok: " << mn2->toString() <<std::endl;
128#endif
132 }
133#ifdef DEBUG_BUBBLEFISH_SCHEDULER
134 std::cerr << "Schedulingfront scheduled ok!: " << this << std::endl;
135 printFront("\t");
136#endif
137 for (auto i : schedulingFront_) {
138 if (!i->isScheduled()) {
139 if (!sched_.isDeadResult(*i) &&
141 std::cerr << "Front Has unscheduled move: "
142 << (*i).toString() << std::endl;
143 ddg().writeToDotFile("front_unscheduled_move.dot");
144 assert(0);
145 }
146 } else {
147#ifdef DEBUG_BUBBLEFISH_SCHEDULER
148 std::cerr << "\tNotifying scheduled: " << (*i).toString()
149 << std::endl;
150#endif
152
153 }
154 }
155
156 for (auto n: nodesToNotify_) {
157 if (!sched_.isDeadResult(*n) && !n->isScheduled()) {
158#ifdef DEBUG_BUBBLEFISH_SCHEDULER
159 std::cerr << "\tMight be ready: " << n->toString() << std::endl;
160#endif
162 }
163 }
165 return true;
166}
167
168
169
171#ifdef DEBUG_BUBBLEFISH_SCHEDULER
172 std::cerr << std::endl << "\tGot: " << mn.toString() << " to schedule(2)"
173 << std::endl;
174#endif
175 while(true) {
177 assert(prologRM() != NULL);
178#ifdef DEBUG_BUBBLEFISH_SCHEDULER
179 std::cerr << "\tShould scheudule pre-opshare to prefix: "
180 << mn.toString() << std::endl;
181#endif
182 BFOptimization* sbu = new BFDropPreShared(sched_, mn);
183 bool ok = runPreChild(sbu);
184 if (!ok) {
185 int lcFront = latestScheduledOfFrontCycle();
186 if (lcFront != -1 && lcFront <= latestCycle) {
187 latestCycle = lcFront -1;
188 } else{
189 latestCycle--;
190 }
191 return false;
192 } else {
193 return true;
194 }
195 }
196
197 // Kill (result) moves that write to values that are
198 // never used(even when not bypassing).
199 if (mn.isDestinationVariable()) {
200 BFOptimization* dre = new BFDREEarly(sched_, mn);
201 if (runPreChild(dre)) {
202 return true;
203 }
204 }
205
207 if (limits.direction == BF2Scheduler::EXACTCYCLE &&
208 latestCycle < limits.latestCycle) {
209 latestCycle = INT_MIN;
210#ifdef DEBUG_BUBBLEFISH_SCHEDULER
211 std::cerr << "\tlatestCycle later than exact limit. failing."
212 << std::endl;
213#endif
214 return false;
215 }
216
217 limits.latestCycle = std::min(limits.latestCycle, latestCycle);
218#ifdef DEBUG_BUBBLEFISH_SCHEDULER
219 std::cerr << "\tFirst all optimizations on" << std::endl;
220#endif
221 int schedRes = scheduleMove(mn, limits, true, true, true);
222 if (schedRes >= 0) {
223#ifdef DEBUG_BUBBLEFISH_SCHEDULER
224 std::cerr << "\tScheduling of: " << mn.toString() << " ok "
225 << std::endl;
226#endif
227 return true;
228 }
229
230 if (limits.direction == BF2Scheduler::TOPDOWN) {
232 limits.earliestCycle = 0;
233#ifdef DEBUG_BUBBLEFISH_SCHEDULER
234 std::cerr << "\tTOPDOWN failed, trying bottomup" << std::endl;
235#endif
236 if (scheduleMove(mn, limits) >= 0) {
237#ifdef DEBUG_BUBBLEFISH_SCHEDULER
238 std::cerr << "\ttopdown back to bottomup ok" << std::endl;
239#endif
240 return true;
241 }
242 }
243#ifdef DEBUG_BUBBLEFISH_SCHEDULER
244 std::cerr << "\tTrying without early sharing" << std::endl;
245#endif
246 // disable early sharing;
247 if (scheduleMove(mn, limits, true, true, false) >=0) {
248 return true;
249 }
250
251
252#ifdef DEBUG_BUBBLEFISH_SCHEDULER
253 std::cerr << "\tTrying without early BP" << std::endl;
254#endif
255 // disable early bypass;
256 if (scheduleMove(mn, limits, false, true) >=0) {
257#ifdef DEBUG_BUBBLEFISH_SCHEDULER
258 std::cerr << "\tok without early BP" << std::endl;
259#endif
260 return true;
261 }
262
263#ifdef DEBUG_BUBBLEFISH_SCHEDULER
264 std::cerr << "\tTrying without early BP and without early sharing"
265 << std::endl;
266#endif
267 if (scheduleMove(mn, limits, false, true, false) >= 0) {
268 return true;
269 }
270
271 if (tryRevertEarlierBypass(mn)) {
272#ifdef DEBUG_BUBBLEFISH_SCHEDULER
273 std::cerr << "\tTrying to Revert earlier bypass.."
274 << std::endl;
275#endif
276 // do not make cycle go earlier, but forbid some bypass.
277 return false;
278 }
279
280#ifdef DEBUG_BUBBLEFISH_SCHEDULER
281 std::cerr << "\tTrying without late BP" << std::endl;
282#endif
283 // disable late bypass
284 if (scheduleMove(mn, limits, true, false) >=0) {
285#ifdef DEBUG_BUBBLEFISH_SCHEDULER
286 std::cerr << "\tok without late BP" << std::endl;
287#endif
288 return true;
289 }
290#ifdef DEBUG_BUBBLEFISH_SCHEDULER
291 std::cerr << "\tTrying without any BP" << std::endl;
292#endif
293 // disable both bypasses
294 if (scheduleMove(mn, limits, false, false) >=0) {
295#ifdef DEBUG_BUBBLEFISH_SCHEDULER
296 std::cerr << "\tok without any BP" << std::endl;
297#endif
298 return true;
299 }
300
301 if (mn.destinationOperationCount() > 1) {
302#ifdef DEBUG_BUBBLEFISH_SCHEDULER
303 std::cerr << "forbidding operand share: " << mn.toString()
304 << std::endl;
305#endif
306 illegalOperandShares_.insert(&mn);
307 return true;
308 }
309
310#ifdef DEBUG_BUBBLEFISH_SCHEDULER
311 std::cerr << "\tScheduleMove failing, need to tr earlier cycle"
312 << std::endl;
313#endif
314
315 int lcFront = latestScheduledOfFrontCycle();
316 if (lcFront != -1 && lcFront <= latestCycle) {
317 latestCycle = lcFront -1;
318 } else{
319 latestCycle--;
320 }
321 return false;
322 }
323 std::cerr << "end of schduleMoveOuter, should not be here!" << std::endl;
324 return true;
325}
326
328 int lc = -1;
329 for (auto mn : schedulingFront_) {
330 if (mn->isScheduled() && mn->cycle() > lc) {
331 lc = mn->cycle();
332 }
333 }
334 return lc;
335}
336
337
338
340#ifdef DEBUG_BUBBLEFISH_SCHEDULER
341 std::cerr << "\tGetting moveNode from front" << std::endl;
342#endif
343 int sd = -2;
344 MoveNode* selectedMN = NULL;
345 for (auto mn: schedulingFront_) {
346
347 if (mn->isScheduled() || sched_.isDeadResult(*mn)) {
348 continue;
349 }
350
351 int cursd;
352 auto j = pathLengthCache_.find(mn);
353 if (j != pathLengthCache_.end()) {
354 cursd = j->second;
355 } else {
356 cursd = ddg().maxSourceDistance(*mn);
357 pathLengthCache_[mn] = cursd;
358 }
359
360 // weight more last moves of unready ops
361 if (mn->isDestinationOperation()) {
362 if (mn->isLastUnscheduledMoveOfDstOp()) {
363 cursd += 10000;
364 }
365 }
366
367 if (cursd > sd &&
369 selectedMN = mn;
370 sd = cursd;
371 }
372 }
373
374 if (selectedMN != NULL && !sched_.isPreLoopSharedOperand(*selectedMN)) {
375#ifdef DEBUG_BUBBLEFISH_SCHEDULER
376 std::cerr << "\t\tSelected:" << selectedMN->toString() << std::endl;
377#endif
378 MoveNode* trigger = getSisterTrigger(*selectedMN, targetMachine());
379 if (trigger != NULL && !trigger->isScheduled() &&
380 !sched_.hasUnscheduledSuccessors(*trigger)) {
381#ifdef DEBUG_BUBBLEFISH_SCHEDULER
382 std::cerr << "\t\tReturning trigger instead:"
383 << trigger->toString() << std::endl;
384#endif
385
386 BFSwapOperands* bfswo = new BFSwapOperands(sched_, *trigger);
387 if (runPreChild(bfswo)) {
388 return bfswo->switchedMNs().second;
389 } else {
390 return trigger;
391 }
392 }
393 }
394 if (selectedMN != NULL) {
395#ifdef DEBUG_BUBBLEFISH_SCHEDULER
396 std::cerr << "\tSelected MN: " << selectedMN->toString() << std::endl;
397#endif
398 } else {
399#ifdef DEBUG_BUBBLEFISH_SCHEDULER
400 std::cerr << "Front empty, returning NULL" << std::endl;
401#endif
402 }
403 return selectedMN;
404}
405
407 int prefCycle = INT_MAX;
408 if (mn.isSourceOperation()) {
409 if (!mn.isDestinationOperation()) {
410 const ProgramOperation& sop = mn.sourceOperation();
411 for (int i = 0; i < sop.outputMoveCount(); i++) {
412 const MoveNode& outNode = sop.outputMove(i);
413 if (!outNode.isScheduled()) {
414 continue;
415 }
416
417#ifdef DEBUG_BUBBLEFISH_SCHEDULER
418 std::cerr << "\t\tOut node: " << outNode.toString()
419 << " is scheduled!" << std::endl;
420#endif
421 const TTAMachine::HWOperation& hwop =
422 *sop.hwopFromOutMove(outNode);
423 // find the OSAL id of the operand of the output being tested
424 const int outNodeOutputIndex =
425 sop.outputIndexOfMove(outNode);
426 int onLatency = hwop.latency(outNodeOutputIndex);
427 int latestTrigger = outNode.cycle() - onLatency;
428 const int myOutIndex = mn.move().source().operationIndex();
429 int myLatency = hwop.latency(myOutIndex);
430 int myPreferredCycle = latestTrigger + myLatency;
431 if (myPreferredCycle < prefCycle) {
432 prefCycle = myPreferredCycle;
433 }
434 }
435 }
436 }
437 return prefCycle;
438}
439
442 const MoveNode& mn) {
444 int prefCycle = prefResultCycle(mn);
445
446 if (prefCycle != INT_MAX) {
447#ifdef DEBUG_BUBBLEFISH_SCHEDULER
448 std::cerr << "Schedulong TOP-DOWN(TD)" << mn.toString() << std::endl;
449 std::cerr << "Setting earl. limit to pref:" << prefCycle << std::endl;
450#endif
451 limits.earliestCycle = prefCycle;
453 }
454 if (mn.move().isControlFlowMove() &&
455 getSisterTrigger(mn, targetMachine()) == &mn) {
456 prefCycle = lc_- targetMachine().controlUnit()->delaySlots();
457#ifdef DEBUG_BUBBLEFISH_SCHEDULER
458 std::cerr << "Control flow move requires exact cycle: "
459 << prefCycle << std::endl;
460#endif
461 limits.earliestCycle = limits.latestCycle = prefCycle;
463 }
464 return limits;
465}
466
467int
469 MoveNode& mn,
471 bool allowEarlyBypass, bool allowLateBypass, bool allowEarlySharing) {
472
473 BFOptimization* sched;
474 switch (limits.direction) {
477 sched = new BFScheduleBU(
478 sched_, mn, limits.latestCycle, allowEarlyBypass,
479 allowLateBypass, allowEarlySharing);
480 break;
481 } else {
482 std::cerr << "Is pre loop shared oper, sch to prolog instead: " <<
483 mn.toString() << std::endl;
484 assert(false);
485 break;
486 }
488 sched = new BFScheduleTD(
489 sched_, mn, limits.earliestCycle, allowLateBypass);
490 break;
492 assert(limits.earliestCycle == limits.latestCycle);
493 sched = new BFScheduleExact(
494 sched_,mn,limits.earliestCycle);
495 break;
496 default:
497 return -1;
498 }
499 return runPreChild(sched) ? 1 : -1;
500}
501
505 for (auto mn : moves) {
506 if (!mn->isFinalized()) { // && !sched_.isPreLoopSharedOperand(*mn)) {
507 mn->setIsInFrontier(true);
508 schedulingFront_.insert(mn);
509 }
510 }
511}
512
515
518 queue.insert(&mn);
519
520 while (!queue.empty()) {
521 MoveNode* mn = *queue.begin();
522 nodes.insert(mn);
523 queue.erase(mn);
524 if (mn->isSourceOperation()) {
526 mn->sourceOperation(), nodes, queue);
527 }
528
529 if (mn->isGuardOperation()) {
531 mn->guardOperation(), nodes, queue);
532 }
533
534 for (unsigned int i = 0; i < mn->destinationOperationCount(); i++) {
536 mn->destinationOperation(i), nodes, queue);
537 }
538
540 if (ddg().hasNode(*mn)) {
541 MoveNode* bypassSrc =
542 ddg().onlyRegisterRawSource(*mn, false, false);
543 if (bypassSrc != NULL) {
544 if (nodes.find(bypassSrc) == nodes.end()) {
545 queue.insert(bypassSrc);
546 }
547 } else {
548#ifdef DEBUG_BUBBLEFISH_SCHEDULER
549 std::cerr << "Warning:Cannot find src for forced bypass. "
550 << " Inst. scheduler may fail/deadlock" <<std::endl;
551#endif
552 }
553 }
554 }
555
557 if (ddg().hasNode(*mn)) {
558 DataDependenceGraph::NodeSet rrDestinations =
559 ddg().onlyRegisterRawDestinations(*mn, false, false);
560 for (auto n : rrDestinations) {
561 if (nodes.find(n) == nodes.end()) {
562 queue.insert(n);
563 }
564 }
565 }
566 }
567 }
568 return nodes;
569}
570
571
573 for (auto node : schedulingFront_) {
574 if (sched_.isDeadResult(*node)) {
575 std::cerr << "DEAD ";
576 }
577 std::cerr << prefix << node->toString() << std::endl;
578 }
579}
580
581
583#ifdef DEBUG_BUBBLEFISH_SCHEDULER
584 std::cerr << "should undo front. printing front:" << std::endl;
585 printFront("\t");
586#endif
588
589#ifdef DEBUG_BUBBLEFISH_SCHEDULER
590 std::cerr << "should have cleared. printing front:" << std::endl;
591 printFront("\t");
592#endif
593}
594
596 for (auto node : schedulingFront_) {
597 node->setIsInFrontier(false);
598 }
599 schedulingFront_.clear();
600}
601
602
605 DataDependenceGraph::NodeSet processedNodes;
606 queue.insert(&mn);
607
608 while (!queue.empty()) {
609 MoveNode* mn = *queue.begin();
610 processedNodes.insert(mn);
611 queue.erase(mn);
612
614 for (auto i : bypassSources_) {
615 if (i.second == mn) {
616 return mn;
617 }
618 }
619 }
620
621 if (mn->isSourceOperation()) {
622 MoveNode *result =
624 mn->sourceOperation(), processedNodes, queue);
625 if (result != NULL) {
626 return result;
627 }
628 }
629
630 for (unsigned int i = 0; i < mn->destinationOperationCount(); i++) {
631 MoveNode *result =
633 mn->destinationOperation(i), processedNodes, queue);
634 if (result != NULL) {
635 return result;
636 }
637 }
638 }
639 return NULL;
640}
641
644 const DataDependenceGraph::NodeSet& processedNodes,
646 for (int j = 0; j < po.inputMoveCount(); j++) {
647 MoveNode& inputMove = po.inputMove(j);
648 // only add if not already added
649 if (processedNodes.find(&inputMove) == processedNodes.end()) {
650 queue.insert(&inputMove);
651 }
652 }
653
654 for (int j = 0; j < po.outputMoveCount(); j++) {
655 MoveNode& outputMove = po.outputMove(j);
656 // only add if not already added
657 if (processedNodes.find(&outputMove) == processedNodes.end()) {
658 if (!sched_.isDestinationUniversalReg(outputMove)) {
659 for (auto i : bypassSources_) {
660 if (i.second == &outputMove) {
661 return &outputMove;
662 }
663 }
664 }
665 queue.insert(&outputMove);
666 }
667 }
668 return NULL;
669}
670
672
673 MoveNode* inducingBypass = findInducingBypassSource(mn);
674 while (inducingBypass != NULL &&
675 sched_.isDestinationUniversalReg(*inducingBypass)) {
676 for (auto i : bypassSources_) {
677 if (i.second == inducingBypass) {
678 inducingBypass = findInducingBypassSource(*(i.first));
679 break;
680 }
681 }
682 }
683 if (inducingBypass == NULL) {
684 return false;
685 } else {
686#ifdef DEBUG_BUBBLEFISH_SCHEDULER
687 std::cerr << "\t\tMaking illegal bypas of src: "
688 << inducingBypass->toString() << std::endl;
689#endif
690 if (illegalBypassSources_.count(inducingBypass)) {
691#ifdef DEBUG_BUBBLEFISH_SCHEDULER
692 std::cerr << "\t\tIs already illegal bypass! " << std::endl;
693#endif
694 return false;
695 }
696 illegalBypassSources_.insert(inducingBypass);
697 return true;
698 }
699}
700
704
#define assert(condition)
static int verboseLevel()
static void append(const ContainerType &src, ContainerType &dest)
BF2Scheduler::MoveNodeMap MoveNodeMap
MoveNode * getMoveNodeFromFrontBU()
DataDependenceGraph::NodeSet schedulingFront_
bool scheduleFrontFromMove(MoveNode &mn)
DataDependenceGraph::NodeSet illegalOperandShares_
bool tryRevertEarlierBypass(MoveNode &mn)
void undoOnlyMe() override
DataDependenceGraph::NodeSet illegalBypassSources_
int scheduleMove(MoveNode &move, BF2Scheduler::SchedulingLimits limits, bool allowEarlyBypass=true, bool allowLateBypass=true, bool allowEarlySharing=true)
DataDependenceGraph::NodeSet allNodesOfSameOperation(MoveNode &mn)
void requeueOtherMovesOfSameOp(MoveNode &mn)
static int prefResultCycle(const MoveNode &mn)
BF2Scheduler::SchedulingLimits getPreferredLimits(const MoveNode &mn)
void appendBypassSources(MoveNodeMap &map)
DataDependenceGraph::NodeSet nodesToNotify_
bool tryToScheduleMoveOuter(MoveNode &mn, int &latestCycle)
MoveNodeMap bypassSources_
virtual bool operator()() override
void mightBeReady(MoveNode &n) override
MoveNode * findInducingBypassSource(MoveNode &mn)
MoveNode * findInducingBypassSourceFromOperation(ProgramOperation &po, const DataDependenceGraph::NodeSet &processedNodes, DataDependenceGraph::NodeSet &queue)
void printFront(const TCEString &prefix)
PathLengthCache pathLengthCache_
bool isDeadResult(MoveNode &mn) const
static bool isSourceUniversalReg(const MoveNode &mn)
static bool isDestinationUniversalReg(const MoveNode &mn)
TTAMachine::FUPort * isPreLoopSharedOperand(MoveNode &mn) const
bool hasUnscheduledSuccessors(MoveNode &mn) const
BF2Scheduler & sched_
static MoveNode * getSisterTrigger(const MoveNode &mn, const TTAMachine::Machine &mach)
SimpleResourceManager * prologRM() const
BUMoveNodeSelector & selector()
DataDependenceGraph & ddg()
const TTAMachine::Machine & targetMachine() const
SimpleResourceManager & rm() const
std::pair< MoveNode *, MoveNode * > switchedMNs()
static void queueOperation(ProgramOperation &po, const DataDependenceGraph::NodeSet &nodes, DataDependenceGraph::NodeSet &queue)
virtual void notifyScheduled(MoveNode &node)
virtual void mightBeReady(MoveNode &node)
int maxSourceDistance(const GraphNode &node) const
NodeSet onlyRegisterRawDestinations(const MoveNode &mn, bool allowGuardEdges=false, bool allowBackEdges=false) const
MoveNode * onlyRegisterRawSource(const MoveNode &mn, int allowGuardEdges=2, int backEdges=0) const
virtual void writeToDotFile(const TCEString &fileName) const
std::set< GraphNode *, typename GraphNode::Comparator > NodeSet
Definition Graph.hh:53
unsigned int destinationOperationCount() const
bool isGuardOperation() const
Definition MoveNode.cc:181
int cycle() const
Definition MoveNode.cc:421
ProgramOperation & sourceOperation() const
Definition MoveNode.cc:453
bool isDestinationOperation() const
std::string toString() const
Definition MoveNode.cc:576
ProgramOperation & guardOperation() const
Definition MoveNode.cc:479
void setIsInFrontier(bool inFrontier=true)
TTAProgram::Move & move()
bool isSourceOperation() const
Definition MoveNode.cc:168
bool isScheduled() const
Definition MoveNode.cc:409
bool isFinalized() const
bool isDestinationVariable() const
Definition MoveNode.cc:264
ProgramOperation & destinationOperation(unsigned int index=0) const
const TTAMachine::HWOperation * hwopFromOutMove(const MoveNode &outputNode) const
int outputMoveCount() const
int inputMoveCount() const
MoveNode & inputMove(int index) const
MoveNode & outputMove(int index) const
int outputIndexOfMove(const MoveNode &mn) const
virtual void undo()
Definition Reversible.cc:69
bool runPreChild(Reversible *preChild)
virtual int smallestCycle() const override
virtual ControlUnit * controlUnit() const
Definition Machine.cc:345
int maximumLatency() const
Definition Machine.cc:1023
bool isControlFlowMove() const
Definition Move.cc:233
Terminal & source() const
Definition Move.cc:302
virtual int operationIndex() const
Definition Terminal.cc:364
SchedulingDirection direction