From 41d8d420d8d1a24e4473eb23ddf3d7934774a689 Mon Sep 17 00:00:00 2001 From: Liam Huber Date: Tue, 30 Jul 2024 14:59:05 -0700 Subject: [PATCH] [patch] Introduce caching (#395) * Refactor: output signals to emission Wrap them in `emit()` and `emitting_channels` instead of manually calling them. This lets us tighten up If-like nodes too. * Introduce caching To shortcut actually running a node and just return existing output if its cached input matches its current input (by `==` test) * Extend speedup test to include caching * Add docstring * Expose use_cache as a class attribute So it can be set at class definition time, even by decorators * Discuss caching in the deepdive * Format black --------- Co-authored-by: pyiron-runner --- notebooks/deepdive.ipynb | 1711 +++++++++++--------- pyiron_workflow/node.py | 44 +- pyiron_workflow/nodes/composite.py | 25 +- pyiron_workflow/nodes/for_loop.py | 9 +- pyiron_workflow/nodes/function.py | 32 +- pyiron_workflow/nodes/macro.py | 22 +- pyiron_workflow/nodes/standard.py | 15 +- pyiron_workflow/nodes/transform.py | 69 +- tests/integration/test_parallel_speedup.py | 30 +- tests/integration/test_workflow.py | 55 +- tests/unit/test_node.py | 5 + 11 files changed, 1154 insertions(+), 863 deletions(-) diff --git a/notebooks/deepdive.ipynb b/notebooks/deepdive.ipynb index bed19c32..b2917fc8 100644 --- a/notebooks/deepdive.ipynb +++ b/notebooks/deepdive.ipynb @@ -15,6 +15,7 @@ "- Workflows: keeping your computational graphs organized\n", "- Node packages: making nodes re-usable\n", "- Macro nodes: complex computations by composing sub-graphs\n", + "- Caching: Why run if you don't have to\n", "- Dragons and the future: remote execution, cyclic flows, and more\n", "\n", "To jump straight to how to use `pyiron_workflow`, go look at the quickstart guide -- this jumps straight to using `Workflow` as a single-point-of-access, creating nodes with decorators, and leveraging node packages to form complex graphs.\n", @@ -290,7 +291,7 @@ "output_type": "stream", "text": [ "RAISED TypeError\n", - "The channel x cannot take the value `not an integer` because it is not compliant with the type hint \n" + "The channel /adder.x cannot take the value `not an integer` () because it is not compliant with the type hint \n" ] } ], @@ -1358,339 +1359,339 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "clustersimple\n", - "\n", + "\n", "simple: Workflow\n", "\n", "clustersimpleInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clustersimpleOutputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Outputs\n", + "\n", + "Outputs\n", "\n", "\n", "clustersimplea\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "a: AddOne\n", + "\n", + "a: AddOne\n", "\n", "\n", "clustersimpleaInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clustersimpleaOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clustersimpleb\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "b: AddOne\n", + "\n", + "b: AddOne\n", "\n", "\n", "clustersimplebInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clustersimplebOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clustersimplesum\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "sum: Add\n", + "\n", + "sum: Add\n", "\n", "\n", "clustersimplesumInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clustersimplesumOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "\n", "clustersimpleInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clustersimpleOutputsran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clustersimpleInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clustersimpleInputsa\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "clustersimpleaInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clustersimpleInputsa->clustersimpleaInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimpleInputsb\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "clustersimplebInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clustersimpleInputsb->clustersimplebInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimpleOutputsa + 1\n", - "\n", - "a + 1\n", + "\n", + "a + 1\n", "\n", "\n", "\n", "clustersimpleOutputsa + b + 2\n", - "\n", - "a + b + 2\n", + "\n", + "a + b + 2\n", "\n", "\n", "\n", "clustersimpleaInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clustersimpleaOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clustersimpleaInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clustersimplesumInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clustersimpleaOutputsWithInjectionran->clustersimplesumInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimpleaOutputsWithInjectiony\n", - "\n", - "y\n", + "\n", + "y\n", "\n", "\n", "\n", "clustersimpleaOutputsWithInjectiony->clustersimpleOutputsa + 1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimplesumInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clustersimpleaOutputsWithInjectiony->clustersimplesumInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimplebInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clustersimplebOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clustersimplebInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clustersimplebOutputsWithInjectionran->clustersimplesumInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimplebOutputsWithInjectiony\n", - "\n", - "y\n", + "\n", + "y\n", "\n", "\n", "\n", "clustersimplesumInputsy\n", - "\n", - "y\n", + "\n", + "y\n", "\n", "\n", "\n", "clustersimplebOutputsWithInjectiony->clustersimplesumInputsy\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clustersimplesumInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clustersimplesumOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clustersimplesumOutputsWithInjectionsum\n", - "\n", - "sum\n", + "\n", + "sum\n", "\n", "\n", "\n", "clustersimplesumOutputsWithInjectionsum->clustersimpleOutputsa + b + 2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 41, @@ -2026,416 +2027,416 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "clusterAddThenCount\n", - "\n", + "\n", "AddThenCount: AddThenCount\n", "\n", "clusterAddThenCountInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_to_n1\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_to_n1: AddThree\n", + "\n", + "add_to_n1: AddThree\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_to_n2\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_to_n2: AddThree\n", + "\n", + "add_to_n2: AddThree\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountsum_plus_5\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "sum_plus_5: Add\n", + "\n", + "sum_plus_5: Add\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountdigit_counter\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "digit_counter: HowManyDigits\n", + "\n", + "digit_counter: HowManyDigits\n", "\n", "\n", "clusterAddThenCountdigit_counterInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "\n", "clusterAddThenCountInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountInputsn1\n", - "\n", - "n1: int\n", + "\n", + "n1: int\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsx\n", - "\n", - "x: int\n", + "\n", + "x: int\n", "\n", "\n", "\n", "clusterAddThenCountInputsn1->clusterAddThenCountadd_to_n1Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountInputsn2\n", - "\n", - "n2: int\n", + "\n", + "n2: int\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsx\n", - "\n", - "x: int\n", + "\n", + "x: int\n", "\n", "\n", "\n", "clusterAddThenCountInputsn2->clusterAddThenCountadd_to_n2Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountOutputsWithInjectiondigits_in_sum_plus_5\n", - "\n", - "digits_in_sum_plus_5\n", + "\n", + "digits_in_sum_plus_5\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionran->clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_two\n", - "\n", - "add_two\n", + "\n", + "add_two\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsobj\n", - "\n", - "obj\n", + "\n", + "obj\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_two->clusterAddThenCountsum_plus_5Inputsobj\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_three\n", - "\n", - "add_three\n", + "\n", + "add_three\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionran->clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_two\n", - "\n", - "add_two\n", + "\n", + "add_two\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_three\n", - "\n", - "add_three\n", + "\n", + "add_three\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsother\n", - "\n", - "other\n", + "\n", + "other\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_three->clusterAddThenCountsum_plus_5Inputsother\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionran->clusterAddThenCountdigit_counterInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionadd\n", - "\n", - "add\n", + "\n", + "add\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsn\n", - "\n", - "n: int\n", + "\n", + "n: int\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionadd->clusterAddThenCountdigit_counterInputsn\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionn_digits\n", - "\n", - "n_digits\n", + "\n", + "n_digits\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionn_digits->clusterAddThenCountOutputsWithInjectiondigits_in_sum_plus_5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 50, @@ -2460,1054 +2461,1054 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "clusterAddThenCount\n", - "\n", + "\n", "AddThenCount: AddThenCount\n", "\n", "clusterAddThenCountInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_to_n1\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_to_n1: AddThree\n", + "\n", + "add_to_n1: AddThree\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_one: AddOne\n", + "\n", + "add_one: AddOne\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_two: AddOne\n", + "\n", + "add_two: AddOne\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_three: AddOne\n", + "\n", + "add_three: AddOne\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountadd_to_n2\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_to_n2: AddThree\n", + "\n", + "add_to_n2: AddThree\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", - "\n", - "clusterAddThenCountadd_threeadd_to_n2\n", + "\n", + "clusterAddThenCountadd_oneadd_to_n2\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_three: AddOne\n", + "\n", + "add_one: AddOne\n", "\n", - "\n", - "clusterAddThenCountadd_threeadd_to_n2Inputs\n", + "\n", + "clusterAddThenCountadd_oneadd_to_n2Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", - "\n", - "clusterAddThenCountadd_threeadd_to_n2OutputsWithInjection\n", + "\n", + "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", - "\n", - "clusterAddThenCountadd_oneadd_to_n2\n", + "\n", + "clusterAddThenCountadd_twoadd_to_n2\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_one: AddOne\n", + "\n", + "add_two: AddOne\n", "\n", - "\n", - "clusterAddThenCountadd_oneadd_to_n2Inputs\n", + "\n", + "clusterAddThenCountadd_twoadd_to_n2Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", - "\n", - "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjection\n", + "\n", + "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", - "\n", - "clusterAddThenCountadd_twoadd_to_n2\n", + "\n", + "clusterAddThenCountadd_threeadd_to_n2\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_two: AddOne\n", + "\n", + "add_three: AddOne\n", "\n", - "\n", - "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjection\n", + "\n", + "clusterAddThenCountadd_threeadd_to_n2Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "Inputs\n", "\n", - "\n", - "clusterAddThenCountadd_twoadd_to_n2Inputs\n", + "\n", + "clusterAddThenCountadd_threeadd_to_n2OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountsum_plus_5\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "sum_plus_5: Add\n", + "\n", + "sum_plus_5: Add\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountdigit_counter\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "digit_counter: HowManyDigits\n", + "\n", + "digit_counter: HowManyDigits\n", "\n", "\n", "clusterAddThenCountdigit_counterInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountas_strdigit_counter\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "as_str: String\n", + "\n", + "as_str: String\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counter\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "digits: Length\n", + "\n", + "digits: Length\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "\n", "clusterAddThenCountInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountInputsn1\n", - "\n", - "n1: int\n", + "\n", + "n1: int\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsx\n", - "\n", - "x: int\n", + "\n", + "x: int\n", "\n", "\n", "\n", "clusterAddThenCountInputsn1->clusterAddThenCountadd_to_n1Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountInputsn2\n", - "\n", - "n2: int\n", + "\n", + "n2: int\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsx\n", - "\n", - "x: int\n", + "\n", + "x: int\n", "\n", "\n", "\n", "clusterAddThenCountInputsn2->clusterAddThenCountadd_to_n2Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountOutputsWithInjectiondigits_in_sum_plus_5\n", - "\n", - "digits_in_sum_plus_5\n", + "\n", + "digits_in_sum_plus_5\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1Inputsx->clusterAddThenCountadd_oneadd_to_n1Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionran->clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_two\n", - "\n", - "add_two\n", + "\n", + "add_two\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsobj\n", - "\n", - "obj\n", + "\n", + "obj\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_two->clusterAddThenCountsum_plus_5Inputsobj\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n1OutputsWithInjectionadd_three\n", - "\n", - "add_three\n", + "\n", + "add_three\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1OutputsWithInjectionran->clusterAddThenCountadd_twoadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n1OutputsWithInjectionresult->clusterAddThenCountadd_twoadd_to_n1Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjectionran->clusterAddThenCountadd_threeadd_to_n1Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjectionresult->clusterAddThenCountadd_to_n1OutputsWithInjectionadd_two\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n1OutputsWithInjectionresult->clusterAddThenCountadd_threeadd_to_n1Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n1OutputsWithInjectionresult->clusterAddThenCountadd_to_n1OutputsWithInjectionadd_three\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2Inputsx->clusterAddThenCountadd_oneadd_to_n2Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionran->clusterAddThenCountsum_plus_5Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_two\n", - "\n", - "add_two\n", + "\n", + "add_two\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_three\n", - "\n", - "add_three\n", + "\n", + "add_three\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsother\n", - "\n", - "other\n", + "\n", + "other\n", "\n", "\n", "\n", "clusterAddThenCountadd_to_n2OutputsWithInjectionadd_three->clusterAddThenCountsum_plus_5Inputsother\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjectionran->clusterAddThenCountadd_twoadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_oneadd_to_n2OutputsWithInjectionresult->clusterAddThenCountadd_twoadd_to_n2Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjectionran->clusterAddThenCountadd_threeadd_to_n2Inputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjectionresult->clusterAddThenCountadd_to_n2OutputsWithInjectionadd_two\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2Inputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusterAddThenCountadd_twoadd_to_n2OutputsWithInjectionresult->clusterAddThenCountadd_threeadd_to_n2Inputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2OutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusterAddThenCountadd_threeadd_to_n2OutputsWithInjectionresult->clusterAddThenCountadd_to_n2OutputsWithInjectionadd_three\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionran->clusterAddThenCountdigit_counterInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionadd\n", - "\n", - "add\n", + "\n", + "add\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsn\n", - "\n", - "n: int\n", + "\n", + "n: int\n", "\n", "\n", "\n", "clusterAddThenCountsum_plus_5OutputsWithInjectionadd->clusterAddThenCountdigit_counterInputsn\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterInputsobj\n", - "\n", - "obj\n", + "\n", + "obj\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterInputsn->clusterAddThenCountas_strdigit_counterInputsobj\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionn_digits\n", - "\n", - "n_digits\n", + "\n", + "n_digits\n", "\n", "\n", "\n", "clusterAddThenCountdigit_counterOutputsWithInjectionn_digits->clusterAddThenCountOutputsWithInjectiondigits_in_sum_plus_5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterOutputsWithInjectionran->clusterAddThenCountdigitsdigit_counterInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterOutputsWithInjectionstr\n", - "\n", - "str\n", + "\n", + "str\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterInputsobj\n", - "\n", - "obj\n", + "\n", + "obj\n", "\n", "\n", "\n", "clusterAddThenCountas_strdigit_counterOutputsWithInjectionstr->clusterAddThenCountdigitsdigit_counterInputsobj\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterOutputsWithInjectionlen\n", - "\n", - "len\n", + "\n", + "len\n", "\n", "\n", "\n", "clusterAddThenCountdigitsdigit_counterOutputsWithInjectionlen->clusterAddThenCountdigit_counterOutputsWithInjectionn_digits\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 51, @@ -3532,320 +3533,320 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "clusteradd_to_n1\n", - "\n", + "\n", "add_to_n1: AddThree\n", "\n", "clusteradd_to_n1Inputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusteradd_to_n1OutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusteradd_to_n1add_one\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_one: AddOne\n", + "\n", + "add_one: AddOne\n", "\n", "\n", "clusteradd_to_n1add_oneInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusteradd_to_n1add_oneOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusteradd_to_n1add_two\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_two: AddOne\n", + "\n", + "add_two: AddOne\n", "\n", "\n", "clusteradd_to_n1add_twoInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusteradd_to_n1add_three\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "add_three: AddOne\n", + "\n", + "add_three: AddOne\n", "\n", "\n", "clusteradd_to_n1add_threeInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusteradd_to_n1add_threeOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "\n", "clusteradd_to_n1Inputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusteradd_to_n1OutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusteradd_to_n1Inputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusteradd_to_n1Inputsx\n", - "\n", - "x: int\n", + "\n", + "x: int\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusteradd_to_n1Inputsx->clusteradd_to_n1add_oneInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1OutputsWithInjectionadd_two\n", - "\n", - "add_two\n", + "\n", + "add_two\n", "\n", "\n", "\n", "clusteradd_to_n1OutputsWithInjectionadd_three\n", - "\n", - "add_three\n", + "\n", + "add_three\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneOutputsWithInjectionran->clusteradd_to_n1add_twoInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneOutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusteradd_to_n1add_oneOutputsWithInjectionresult->clusteradd_to_n1add_twoInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjectionran->clusteradd_to_n1add_threeInputsaccumulate_and_run\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjectionresult->clusteradd_to_n1OutputsWithInjectionadd_two\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeInputsx\n", - "\n", - "x\n", + "\n", + "x\n", "\n", "\n", "\n", "clusteradd_to_n1add_twoOutputsWithInjectionresult->clusteradd_to_n1add_threeInputsx\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeOutputsWithInjectionresult\n", - "\n", - "result\n", + "\n", + "result\n", "\n", "\n", "\n", "clusteradd_to_n1add_threeOutputsWithInjectionresult->clusteradd_to_n1OutputsWithInjectionadd_three\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 52, @@ -3899,7 +3900,7 @@ "output_type": "stream", "text": [ "None 1\n", - " 5\n" + " 5\n" ] } ], @@ -3985,7 +3986,7 @@ "output_type": "stream", "text": [ "None 1\n", - " 5\n", + " 5\n", "Finally 5\n", "b (Add):\n", "Inputs ['obj', 'other']\n", @@ -4046,7 +4047,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "6.017223608003405\n" + "6.015308832982555\n" ] } ], @@ -4078,7 +4079,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "4.0332591559999855\n" + "3.8395268750027753\n" ] } ], @@ -4753,7 +4754,7 @@ "\n", "\n", - "\n", "\n", "\n", "clusterAddWhileLessThanInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddWhileLessThanOutputsWithInjection\n", @@ -4787,24 +4788,24 @@ "\n", "clusterAddWhileLessThanbody\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "body: Add\n", + "\n", + "body: Add\n", "\n", "\n", "clusterAddWhileLessThanbodyInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddWhileLessThanbodyOutputsWithInjection\n", @@ -4842,46 +4843,46 @@ "\n", "clusterAddWhileLessThanconditionOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddWhileLessThanswitch\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "switch: If\n", + "\n", + "switch: If\n", "\n", "\n", "clusterAddWhileLessThanswitchInputs\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "Inputs\n", + "\n", + "Inputs\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "clusterAddWhileLessThanhistory\n", @@ -4908,19 +4909,19 @@ "\n", "clusterAddWhileLessThanhistoryOutputsWithInjection\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "\n", - "\n", - "OutputsWithInjection\n", + "\n", + "OutputsWithInjection\n", "\n", "\n", "\n", "clusterAddWhileLessThanInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", @@ -4932,14 +4933,14 @@ "\n", "\n", "clusterAddWhileLessThanInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddWhileLessThanInputsa\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", @@ -4950,34 +4951,34 @@ "\n", "\n", "clusterAddWhileLessThanInputsa->clusterAddWhileLessThanbodyInputsobj\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddWhileLessThanInputsb\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "clusterAddWhileLessThanbodyInputsother\n", - "\n", - "other\n", + "\n", + "other\n", "\n", "\n", "\n", "clusterAddWhileLessThanInputsb->clusterAddWhileLessThanbodyInputsother\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddWhileLessThanInputscap\n", - "\n", - "cap\n", + "\n", + "cap\n", "\n", "\n", "\n", @@ -4988,9 +4989,9 @@ "\n", "\n", "clusterAddWhileLessThanInputscap->clusterAddWhileLessThanconditionInputsother\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -5020,15 +5021,15 @@ "\n", "\n", "clusterAddWhileLessThanconditionInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddWhileLessThanbodyOutputsWithInjectionran->clusterAddWhileLessThanconditionInputsrun\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -5092,123 +5093,123 @@ "\n", "\n", "clusterAddWhileLessThanconditionOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddWhileLessThanconditionInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchInputsrun\n", - "\n", - "run\n", + "\n", + "run\n", "\n", "\n", "\n", "clusterAddWhileLessThanconditionOutputsWithInjectionran->clusterAddWhileLessThanswitchInputsrun\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddWhileLessThanconditionOutputsWithInjectionlt\n", - "\n", - "lt\n", + "\n", + "lt\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchInputscondition\n", - "\n", - "condition\n", + "\n", + "condition\n", "\n", "\n", "\n", "clusterAddWhileLessThanconditionOutputsWithInjectionlt->clusterAddWhileLessThanswitchInputscondition\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjectiontrue\n", - "\n", - "true\n", + "\n", + "true\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjectiontrue->clusterAddWhileLessThanbodyInputsrun\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjectionfalse\n", - "\n", - "false\n", + "\n", + "false\n", "\n", "\n", "\n", "clusterAddWhileLessThanswitchOutputsWithInjectiontruth\n", - "\n", - "truth\n", + "\n", + "truth\n", "\n", "\n", "\n", "clusterAddWhileLessThanhistoryOutputsWithInjectionran\n", - "\n", - "ran\n", + "\n", + "ran\n", "\n", "\n", "\n", "\n", "clusterAddWhileLessThanhistoryInputsaccumulate_and_run\n", - "\n", - "accumulate_and_run\n", + "\n", + "accumulate_and_run\n", "\n", "\n", "\n", "clusterAddWhileLessThanhistoryInputsexisting\n", - "\n", - "existing\n", + "\n", + "existing\n", "\n", "\n", "\n", "clusterAddWhileLessThanhistoryOutputsWithInjectionlist\n", - "\n", - "list\n", + "\n", + "list\n", "\n", "\n", "\n", "clusterAddWhileLessThanhistoryOutputsWithInjectionlist->clusterAddWhileLessThanhistoryInputsexisting\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 65, @@ -5343,6 +5344,114 @@ "reloaded.outputs.greater.value" ] }, + { + "cell_type": "markdown", + "id": "9a9ba00d-842c-4e75-a220-ea7da32b9ab6", + "metadata": {}, + "source": [ + "# Caching\n", + "\n", + "By default, all nodes exploit caching. I.e. when they run they save a fresh dictionary of their input values; in all subsequent runs if the dictionary of their current input values matches (`==`) that last-used dictionary, they skip executing altogether and leverage their existing outputs.\n", + "\n", + "Any changes to the inputs will obviously stop the cache from being retrieved, but for `Composite` nodes it is also reset if any child nodes are added/removed/replaced.\n", + "\n", + "Note that since we do a simple `==` on the dictionary of input values, if your workflow non-idempotently passes around mutable data, it's possible you'll wind up in a situation where you get a false cache hit.\n", + "\n", + "Caching behaviour can be defined at the class-level as a default, but can be overridden for individual nodes. Let's take a look:" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "id": "5a3b01be-6539-4617-98c0-30ef008b394d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'always_new__rand': 598, 'cached__rand': 307}" + ] + }, + "execution_count": 71, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import random \n", + "\n", + "@Workflow.wrap.as_function_node(use_cache=False)\n", + "def Randint(low=0, high=999):\n", + " rand = random.randint(low, high)\n", + " return rand\n", + "\n", + "wf = Workflow(\"mixed_caching\")\n", + "wf.use_cache = False # Turn _off_ caching for the whole workflow!\n", + "\n", + "wf.always_new = Randint()\n", + "wf.cached = Randint()\n", + "wf.cached.use_cache = True # Turn _on_ caching for this node\n", + "\n", + "wf()" + ] + }, + { + "cell_type": "markdown", + "id": "a556920b-3d8a-42b2-b480-ae9b95ea1207", + "metadata": {}, + "source": [ + "Running the same workflow again, we see that the cached node just keeps returning the same \"random\" number, while the un-cached node gives us something new" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "id": "3bf5d930-901b-4067-be74-b82847bc82fd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'always_new__rand': 492, 'cached__rand': 307}" + ] + }, + "execution_count": 72, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wf()" + ] + }, + { + "cell_type": "markdown", + "id": "8ccfd26a-bace-4eba-922e-7fb6f950cf93", + "metadata": {}, + "source": [ + "If we look into the caching data, we can see that the non-caching node has not stored any inputs and does not register a cache hit; even if we had previously cached something, if we switch to `use_cache = False`, we won't even look for the cache hit but will just give new data!" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "id": "60adbd5e-94d9-44fd-847c-ebc4b9b3ad2c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "always_new {'low': 0, 'high': 999} None False\n", + "cached {'low': 0, 'high': 999} {'low': 0, 'high': 999} True\n" + ] + } + ], + "source": [ + "for node in wf:\n", + " print(node.label, node.inputs.to_value_dict(), node.cached_inputs, node.cache_hit)" + ] + }, { "cell_type": "markdown", "id": "f447531e-3e8c-4c7e-a579-5f9c56b75a5b", @@ -5387,7 +5496,7 @@ }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 74, "id": "c8196054-aff3-4d39-a872-b428d329dac9", "metadata": {}, "outputs": [], @@ -5397,7 +5506,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 75, "id": "ffd741a3-b086-4ed0-9a62-76143a3705b2", "metadata": {}, "outputs": [], @@ -5414,7 +5523,7 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 76, "id": "3a22c622-f8c1-449b-a910-c52beb6a09c3", "metadata": {}, "outputs": [], @@ -5436,7 +5545,7 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 77, "id": "0999d3e8-3a5a-451d-8667-a01dae7c1193", "metadata": {}, "outputs": [], diff --git a/pyiron_workflow/node.py b/pyiron_workflow/node.py index 68a4d7b4..e5f3e955 100644 --- a/pyiron_workflow/node.py +++ b/pyiron_workflow/node.py @@ -13,6 +13,7 @@ from typing import Any, Literal, Optional, TYPE_CHECKING from pyiron_snippets.colors import SeabornColors +from pyiron_snippets.dotdict import DotDict from pyiron_workflow.draw import Node as GraphvizNode from pyiron_workflow.logging import logger @@ -34,6 +35,7 @@ import graphviz from pyiron_snippets.files import DirectoryObject + from pyiron_workflow.channels import OutputSignal from pyiron_workflow.nodes.composite import Composite @@ -128,6 +130,8 @@ class Node( - NOTE: Don't forget to :meth:`shutdown` any created executors outside of a `with` context when you're done with them; we give a convenience method for this. + - Nodes can optionally cache their input to skip running altogether and use + existing output when their current input matches the cached input. - Nodes created from a registered package store their package identifier as a class attribute. - [ALPHA FEATURE] Nodes can be saved to and loaded from file if python >= 3.11. @@ -261,6 +265,11 @@ class Node( Additional signal channels in derived classes can be added to :attr:`signals.inputs` and :attr:`signals.outputs` after this mixin class is initialized. + use_cache (bool): Whether or not to cache the inputs and, when the current + inputs match the cached input (by `==` comparison), to bypass running the + node and simply continue using the existing outputs. Note that you may be + able to trigger a false cache hit in some special case of non-idempotent + nodes working on mutable data. Methods: __call__: An alias for :meth:`pull` that aggressively runs upstream nodes even @@ -303,6 +312,7 @@ class Node( """ package_identifier = None + use_cache = True # This isn't nice, just a technical necessity in the current implementation # Eventually, of course, this needs to be _at least_ file-format independent @@ -336,6 +346,7 @@ def __init__( storage_backend=storage_backend, ) self.save_after_run = save_after_run + self.cached_inputs = None self._user_data = {} # A place for power-users to bypass node-injection self._setup_node() @@ -491,6 +502,20 @@ def run( if fetch_input: self.inputs.fetch() + if self.use_cache and self.cache_hit: # Read and use cache + + if self.parent is None and emit_ran_signal: + self.emit() + elif self.parent is not None: + self.parent.register_child_starting(self) + self.parent.register_child_finished(self) + if emit_ran_signal: + self.parent.register_child_emitting(self) + + return self._outputs_to_run_return() + elif self.use_cache: # Write cache and continue + self.cached_inputs = self.inputs.to_value_dict() + if self.parent is not None: self.parent.register_child_starting(self) @@ -603,6 +628,13 @@ def run_data_tree(self, run_parent_trees_too=False) -> None: if self.parent is not None: self.parent.starting_nodes = parent_starting_nodes + @property + def cache_hit(self): + return self.inputs.to_value_dict() == self.cached_inputs + + def _outputs_to_run_return(self): + return DotDict(self.outputs.to_value_dict()) + def _finish_run(self, run_output: tuple | Future) -> Any | tuple: try: processed_output = super()._finish_run(run_output=run_output) @@ -616,9 +648,9 @@ def _finish_run(self, run_output: tuple | Future) -> Any | tuple: def _finish_run_and_emit_ran(self, run_output: tuple | Future) -> Any | tuple: processed_output = self._finish_run(run_output) if self.parent is None: - self.signals.output.ran() + self.emit() else: - self.parent.register_child_emitting_ran(self) + self.parent.register_child_emitting(self) return processed_output _finish_run_and_emit_ran.__doc__ = ( @@ -629,6 +661,14 @@ def _finish_run_and_emit_ran(self, run_output: tuple | Future) -> Any | tuple: """ ) + @property + def emitting_channels(self) -> tuple[OutputSignal]: + return (self.signals.output.ran,) + + def emit(self): + for channel in self.emitting_channels: + channel() + def execute(self, *args, **kwargs): """ A shortcut for :meth:`run` with particular flags. diff --git a/pyiron_workflow/nodes/composite.py b/pyiron_workflow/nodes/composite.py index e1fbb079..fb05331e 100644 --- a/pyiron_workflow/nodes/composite.py +++ b/pyiron_workflow/nodes/composite.py @@ -155,8 +155,8 @@ def on_run(self): while len(self.running_children) > 0 or len(self.signal_queue) > 0: try: - ran_signal, receiver = self.signal_queue.pop(0) - receiver(ran_signal) + firing, receiving = self.signal_queue.pop(0) + receiving(firing) except IndexError: # The signal queue is empty, but there is still someone running... sleep(self._child_sleep_interval) @@ -192,17 +192,18 @@ def register_child_finished(self, child: Node) -> None: f"{self.provenance_by_execution}, {self.provenance_by_completion}" ) from e - def register_child_emitting_ran(self, child: Node) -> None: + def register_child_emitting(self, child: Node) -> None: """ - To be called by children when they want to emit their `ran` signal. + To be called by children when they want to emit their signals. Args: child [Node]: The child that is finished and would like to fire its `ran` - signal. Should always be a child of `self`, but this is not explicitly - verified at runtime. + signal (and possibly others). Should always be a child of `self`, but + this is not explicitly verified at runtime. """ - for conn in child.signals.output.ran.connections: - self.signal_queue.append((child.signals.output.ran, conn)) + for firing in child.emitting_channels: + for receiving in firing.connections: + self.signal_queue.append((firing, receiving)) @property def run_args(self) -> tuple[tuple, dict]: @@ -211,7 +212,7 @@ def run_args(self) -> tuple[tuple, dict]: def process_run_result(self, run_output): if run_output is not self: self._parse_remotely_executed_self(run_output) - return DotDict(self.outputs.to_value_dict()) + return self._outputs_to_run_return() def _parse_remotely_executed_self(self, other_self): # Un-parent existing nodes before ditching them @@ -259,6 +260,7 @@ def add_child( f"Only new {Node.__name__} instances may be added, but got " f"{type(child)}." ) + self.cached_inputs = None # Reset cache after graph change return super().add_child(child, label=label, strict_naming=strict_naming) def remove_child(self, child: Node | str) -> list[tuple[Channel, Channel]]: @@ -276,6 +278,7 @@ def remove_child(self, child: Node | str) -> list[tuple[Channel, Channel]]: disconnected = child.disconnect() if child in self.starting_nodes: self.starting_nodes.remove(child) + self.cached_inputs = None # Reset cache after graph change return disconnected def replace_child( @@ -354,6 +357,10 @@ def replace_child( for sending_channel, receiving_channel in inbound_links + outbound_links: sending_channel.value_receiver = receiving_channel + # Clear caches + self.cached_inputs = None + replacement.cached_inputs = None + return owned_node def executor_shutdown(self, wait=True, *, cancel_futures=False): diff --git a/pyiron_workflow/nodes/for_loop.py b/pyiron_workflow/nodes/for_loop.py index b6ece86d..6ab10199 100644 --- a/pyiron_workflow/nodes/for_loop.py +++ b/pyiron_workflow/nodes/for_loop.py @@ -427,6 +427,7 @@ def for_node_factory( iter_on: tuple[str, ...] = (), zip_on: tuple[str, ...] = (), output_column_map: dict | None = None, + use_cache: bool = True, /, ): combined_docstring = ( @@ -444,6 +445,7 @@ def for_node_factory( "_iter_on": iter_on, "_zip_on": zip_on, "__doc__": combined_docstring, + "use_cache": use_cache, }, {"output_column_map": output_column_map}, ) @@ -455,6 +457,7 @@ def for_node( iter_on=(), zip_on=(), output_column_map: Optional[dict[str, str]] = None, + use_cache: bool = True, **node_kwargs, ): """ @@ -482,6 +485,8 @@ def for_node( Necessary iff the body node has the same label for an output channel and an input channel being looped over. (Default is None, just use the output channel labels as columb names.) + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) **node_kwargs: Regular keyword node arguments. Returns: @@ -555,6 +560,8 @@ def for_node( """ for_node_factory.clear(_for_node_class_name(body_node_class, iter_on, zip_on)) - cls = for_node_factory(body_node_class, iter_on, zip_on, output_column_map) + cls = for_node_factory( + body_node_class, iter_on, zip_on, output_column_map, use_cache + ) cls.preview_io() return cls(*node_args, **node_kwargs) diff --git a/pyiron_workflow/nodes/function.py b/pyiron_workflow/nodes/function.py index 0d0da086..d779f2e8 100644 --- a/pyiron_workflow/nodes/function.py +++ b/pyiron_workflow/nodes/function.py @@ -327,7 +327,13 @@ def process_run_result(self, function_output: Any | tuple) -> Any | tuple: (function_output,) if len(self.outputs) == 1 else function_output, ): out.value = value - return function_output + return self._outputs_to_run_return() + + def _outputs_to_run_return(self): + output = tuple(self.outputs.to_value_dict().values()) + if len(output) == 1: + output = output[0] + return output def to_dict(self): return { @@ -348,7 +354,11 @@ def color(self) -> str: @classfactory def function_node_factory( - node_function: callable, validate_output_labels: bool, /, *output_labels + node_function: callable, + validate_output_labels: bool, + use_cache: bool = True, + /, + *output_labels, ): """ Create a new :class:`Function` node class based on the given node function. This @@ -358,6 +368,8 @@ def function_node_factory( node_function (callable): The function to be wrapped by the node. validate_output_labels (bool): Flag to indicate if output labels should be validated. + use_cache (bool): Whether nodes of this type should default to caching their + values. *output_labels: Optional labels for the function's output channels. Returns: @@ -373,12 +385,17 @@ def function_node_factory( "_output_labels": None if len(output_labels) == 0 else output_labels, "_validate_output_labels": validate_output_labels, "__doc__": node_function.__doc__, + "use_cache": use_cache, }, {}, ) -def as_function_node(*output_labels: str, validate_output_labels=True): +def as_function_node( + *output_labels: str, + validate_output_labels=True, + use_cache=True, +): """ Decorator to create a new :class:`Function` node class from a given function. This function gets executed on each :meth:`run` of the resulting function. @@ -388,6 +405,8 @@ def as_function_node(*output_labels: str, validate_output_labels=True): validate_output_labels (bool): Flag to indicate if output labels should be validated against the return values in the function node source code. Defaults to True. + use_cache (bool): Whether nodes of this type should default to caching their + values. (Default is True.) Returns: Callable: A decorator that converts a function into a :class:`Function` node @@ -397,7 +416,7 @@ def as_function_node(*output_labels: str, validate_output_labels=True): def decorator(node_function): function_node_factory.clear(node_function.__name__) # Force a fresh class factory_made = function_node_factory( - node_function, validate_output_labels, *output_labels + node_function, validate_output_labels, use_cache, *output_labels ) factory_made._class_returns_from_decorated_function = node_function factory_made.preview_io() @@ -411,6 +430,7 @@ def function_node( *node_args, output_labels: str | tuple[str, ...] | None = None, validate_output_labels: bool = True, + use_cache: bool = True, **node_kwargs, ): """ @@ -427,6 +447,8 @@ def function_node( validated against the return values in the function source code. Defaults to True. Disabling this may be useful if the source code is not available or if the function has multiple return statements. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) **node_kwargs: Keyword arguments for the :class:`Function` initialization -- parsed as node input data when the keyword matches an input channel. @@ -439,7 +461,7 @@ def function_node( output_labels = (output_labels,) function_node_factory.clear(node_function.__name__) # Force a fresh class factory_made = function_node_factory( - node_function, validate_output_labels, *output_labels + node_function, validate_output_labels, use_cache, *output_labels ) factory_made.preview_io() return factory_made(*node_args, **node_kwargs) diff --git a/pyiron_workflow/nodes/macro.py b/pyiron_workflow/nodes/macro.py index a5fd973a..9a0eeea9 100644 --- a/pyiron_workflow/nodes/macro.py +++ b/pyiron_workflow/nodes/macro.py @@ -474,7 +474,11 @@ def __setstate__(self, state): @classfactory def macro_node_factory( - graph_creator: callable, validate_output_labels: bool, /, *output_labels: str + graph_creator: callable, + validate_output_labels: bool, + use_cache: bool = True, + /, + *output_labels: str, ): """ Create a new :class:`Macro` subclass using the given graph creator function. @@ -483,6 +487,8 @@ def macro_node_factory( graph_creator (callable): Function to create the graph for the :class:`Macro`. validate_output_labels (bool): Whether to validate the output labels against the return values of the wrapped function. + use_cache (bool): Whether nodes of this type should default to caching their + values. output_labels (tuple[str, ...]): Optional labels for the :class:`Macro`'s outputs. @@ -499,18 +505,23 @@ def macro_node_factory( "_output_labels": None if len(output_labels) == 0 else output_labels, "_validate_output_labels": validate_output_labels, "__doc__": graph_creator.__doc__, + "use_cache": use_cache, }, {}, ) -def as_macro_node(*output_labels: str, validate_output_labels: bool = True): +def as_macro_node( + *output_labels: str, validate_output_labels: bool = True, use_cache: bool = True +): """ Decorator to convert a function into a :class:`Macro` node. Args: *output_labels (str): Optional labels for the :class:`Macro`'s outputs. validate_output_labels (bool): Whether to validate the output labels. + use_cache (bool): Whether nodes of this type should default to caching their + values. (Default is True.) Returns: callable: A decorator that converts a function into a Macro node. @@ -519,7 +530,7 @@ def as_macro_node(*output_labels: str, validate_output_labels: bool = True): def decorator(graph_creator): macro_node_factory.clear(graph_creator.__name__) # Force a fresh class factory_made = macro_node_factory( - graph_creator, validate_output_labels, *output_labels + graph_creator, validate_output_labels, use_cache, *output_labels ) factory_made._class_returns_from_decorated_function = graph_creator factory_made.preview_io() @@ -533,6 +544,7 @@ def macro_node( *node_args, output_labels: str | tuple[str, ...] | None = None, validate_output_labels: bool = True, + use_cache: bool = True, **node_kwargs, ): """ @@ -547,6 +559,8 @@ def macro_node( the decorated function's source code. validate_output_labels (bool): Whether to validate the output labels. Defaults to True. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) node_kwargs: Keyword arguments for the :class:`Macro` initialization -- parsed as node input data when the keyword matches an input channel. @@ -559,7 +573,7 @@ def macro_node( output_labels = (output_labels,) macro_node_factory.clear(graph_creator.__name__) # Force a fresh class factory_made = macro_node_factory( - graph_creator, validate_output_labels, *output_labels + graph_creator, validate_output_labels, use_cache, *output_labels ) factory_made.preview_io() return factory_made(*node_args, **node_kwargs) diff --git a/pyiron_workflow/nodes/standard.py b/pyiron_workflow/nodes/standard.py index 6e7bb8af..49518d96 100644 --- a/pyiron_workflow/nodes/standard.py +++ b/pyiron_workflow/nodes/standard.py @@ -61,19 +61,12 @@ def node_function(condition): truth = bool(condition) return truth - def process_run_result(self, function_output): - """ - Process the output as usual, then fire signals accordingly. - - Args: - function_output: The result of the node function. - """ - super().process_run_result(function_output) - + @property + def emitting_channels(self) -> tuple[OutputSignal]: if self.outputs.truth.value: - self.signals.output.true() + return (*super().emitting_channels, self.signals.output.true) else: - self.signals.output.false() + return (*super().emitting_channels, self.signals.output.false) @as_function_node("list") diff --git a/pyiron_workflow/nodes/transform.py b/pyiron_workflow/nodes/transform.py index 992d0d91..3dcb4f13 100644 --- a/pyiron_workflow/nodes/transform.py +++ b/pyiron_workflow/nodes/transform.py @@ -108,22 +108,27 @@ def _build_outputs_preview(cls) -> dict[str, Any]: @builds_class_io @classfactory -def inputs_to_list_factory(n: int, /) -> type[InputsToList]: +def inputs_to_list_factory(n: int, use_cache: bool = True, /) -> type[InputsToList]: return ( f"{InputsToList.__name__}{n}", (InputsToList,), - {"_length": n}, + { + "_length": n, + "use_cache": use_cache, + }, {}, ) -def inputs_to_list(n: int, *node_args, **node_kwargs): +def inputs_to_list(n: int, /, *node_args, use_cache: bool = True, **node_kwargs): """ Creates and returns an instance of a dynamically generated :class:`InputsToList` subclass with a specified number of inputs. Args: n (int): Number of input channels. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) *node_args: Positional arguments for the node instance. **node_kwargs: Keyword arguments for the node instance. @@ -131,27 +136,34 @@ def inputs_to_list(n: int, *node_args, **node_kwargs): InputsToList: An instance of the dynamically created :class:`InputsToList` subclass. """ - return inputs_to_list_factory(n)(*node_args, **node_kwargs) + return inputs_to_list_factory(n, use_cache)(*node_args, **node_kwargs) @builds_class_io @classfactory -def list_to_outputs_factory(n: int, /) -> type[ListToOutputs]: +def list_to_outputs_factory(n: int, use_cache: bool = True, /) -> type[ListToOutputs]: return ( f"{ListToOutputs.__name__}{n}", (ListToOutputs,), - {"_length": n}, + { + "_length": n, + "use_cache": use_cache, + }, {}, ) -def list_to_outputs(n: int, /, *node_args, **node_kwargs) -> ListToOutputs: +def list_to_outputs( + n: int, /, *node_args, use_cache: bool = True, **node_kwargs +) -> ListToOutputs: """ Creates and returns an instance of a dynamically generated :class:`ListToOutputs` subclass with a specified number of outputs. Args: n (int): Number of output channels. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) *node_args: Positional arguments for the node instance. **node_kwargs: Keyword arguments for the node instance. @@ -159,7 +171,7 @@ def list_to_outputs(n: int, /, *node_args, **node_kwargs) -> ListToOutputs: ListToOutputs: An instance of the dynamically created :class:`ListToOutputs` subclass. """ - return list_to_outputs_factory(n)(*node_args, **node_kwargs) + return list_to_outputs_factory(n, use_cache)(*node_args, **node_kwargs) class InputsToDict(FromManyInputs, ABC): @@ -209,6 +221,7 @@ def hash_specification( def inputs_to_dict_factory( input_specification: list[str] | dict[str, tuple[Any | None, Any | NOT_DATA]], class_name_suffix: str | None, + use_cache: bool = True, /, ) -> type[InputsToDict]: if class_name_suffix is None: @@ -218,7 +231,10 @@ def inputs_to_dict_factory( return ( f"{InputsToDict.__name__}{class_name_suffix}", (InputsToDict,), - {"_input_specification": input_specification}, + { + "_input_specification": input_specification, + "use_cache": use_cache, + }, {}, ) @@ -227,6 +243,7 @@ def inputs_to_dict( input_specification: list[str] | dict[str, tuple[Any | None, Any | NOT_DATA]], *node_args, class_name_suffix: Optional[str] = None, + use_cache: bool = True, **node_kwargs, ): """ @@ -243,12 +260,14 @@ def inputs_to_dict( *node_args: Other args for the node instance. class_name_suffix (str | None): The suffix to use in the class name. (Default is None, try to generate the suffix by hashing :param:`input_specification`. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) **node_kwargs: Other kwargs for the node instance. Returns: (InputsToDict): A new node for transforming inputs into a dictionary. """ - cls = inputs_to_dict_factory(input_specification, class_name_suffix) + cls = inputs_to_dict_factory(input_specification, class_name_suffix, use_cache) cls.preview_io() return cls(*node_args, **node_kwargs) @@ -282,16 +301,21 @@ def _build_inputs_preview(cls) -> dict[str, tuple[Any, Any]]: @classfactory -def inputs_to_dataframe_factory(n: int, /) -> type[InputsToDataframe]: +def inputs_to_dataframe_factory( + n: int, use_cache: bool = True, / +) -> type[InputsToDataframe]: return ( f"{InputsToDataframe.__name__}{n}", (InputsToDataframe,), - {"_length": n}, + { + "_length": n, + "use_cache": use_cache, + }, {}, ) -def inputs_to_dataframe(n: int, *node_args, **node_kwargs): +def inputs_to_dataframe(n: int, use_cache: bool = True, *node_args, **node_kwargs): """ Creates and returns an instance of a dynamically generated :class:`InputsToDataframe` subclass with a specified number of inputs, each being a @@ -299,6 +323,8 @@ def inputs_to_dataframe(n: int, *node_args, **node_kwargs): Args: n (int): Number of input channels. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) *node_args: Positional arguments for the node instance. **node_kwargs: Keyword arguments for the node instance. @@ -306,7 +332,7 @@ def inputs_to_dataframe(n: int, *node_args, **node_kwargs): InputsToDataframe: An instance of the dynamically created :class:`InputsToDataframe` subclass. """ - cls = inputs_to_dataframe_factory(n) + cls = inputs_to_dataframe_factory(n, use_cache) cls.preview_io() return cls(*node_args, **node_kwargs) @@ -351,7 +377,9 @@ def _build_inputs_preview(cls) -> dict[str, tuple[Any, Any]]: @classfactory -def dataclass_node_factory(dataclass: type, /) -> type[DataclassNode]: +def dataclass_node_factory( + dataclass: type, use_cache: bool = True, / +) -> type[DataclassNode]: if not is_dataclass(dataclass): raise TypeError( f"{DataclassNode} expected to get a dataclass but {dataclass} failed " @@ -369,12 +397,13 @@ def dataclass_node_factory(dataclass: type, /) -> type[DataclassNode]: "dataclass": dataclass, "_output_type_hint": dataclass, "__doc__": dataclass.__doc__, + "use_cache": use_cache, }, {}, ) -def as_dataclass_node(dataclass: type): +def as_dataclass_node(dataclass: type, use_cache: bool = True): """ Decorates a dataclass as a dataclass node -- i.e. a node whose inputs correspond to dataclass fields and whose output is an instance of the dataclass. @@ -387,6 +416,8 @@ def as_dataclass_node(dataclass: type): Args: dataclass (type): A dataclass, i.e. class passing `dataclasses.is_dataclass`. + use_cache (bool): Whether nodes of this type should default to caching their + values. (Default is True.) Returns: (type[DataclassNode]): A :class:`DataclassNode` subclass whose instances @@ -423,12 +454,12 @@ def as_dataclass_node(dataclass: type): >>> f(necessary="input as a node kwarg") Foo(necessary='input as a node kwarg', bar='bar', answer=42, complex_=[1, 2, 3]) """ - cls = dataclass_node_factory(dataclass) + cls = dataclass_node_factory(dataclass, use_cache) cls.preview_io() return cls -def dataclass_node(dataclass: type, *node_args, **node_kwargs): +def dataclass_node(dataclass: type, use_cache: bool = True, *node_args, **node_kwargs): """ Builds a dataclass node from a dataclass -- i.e. a node whose inputs correspond to dataclass fields and whose output is an instance of the dataclass. @@ -441,6 +472,8 @@ def dataclass_node(dataclass: type, *node_args, **node_kwargs): Args: dataclass (type): A dataclass, i.e. class passing `dataclasses.is_dataclass`. + use_cache (bool): Whether this node should default to caching its values. + (Default is True.) *node_args: Other :class:`Node` positional arguments. **node_kwargs: Other :class:`Node` keyword arguments. diff --git a/tests/integration/test_parallel_speedup.py b/tests/integration/test_parallel_speedup.py index cb42e19d..37745da3 100644 --- a/tests/integration/test_parallel_speedup.py +++ b/tests/integration/test_parallel_speedup.py @@ -5,7 +5,7 @@ from pyiron_workflow.channels import NOT_DATA -class TestParallelSpeedup(unittest.TestCase): +class TestSpeedup(unittest.TestCase): def test_speedup(self): def make_workflow(label): @@ -24,9 +24,21 @@ def make_workflow(label): wf.starting_nodes = [wf.a] t0 = perf_counter() wf() - while wf.outputs.d__user_input.value is NOT_DATA: + while wf.running: sleep(0.001) dt_serial = perf_counter() - t0 + t0_cache = perf_counter() + wf() + while wf.running: + sleep(0.001) + dt_cached_serial = perf_counter() - t0_cache + + self.assertLess( + dt_cached_serial, + 0.01 * t, + msg=f"The cache should be trivially fast compared to actual execution of " + f"a sleep node" + ) wf = make_workflow("parallel") wf.d << (wf.a, wf.b, wf.c) @@ -39,10 +51,16 @@ def make_workflow(label): t1 = perf_counter() wf() - while wf.outputs.d__user_input.value is NOT_DATA: + while wf.running: sleep(0.001) dt_parallel = perf_counter() - t1 + t1_cache = perf_counter() + wf() + while wf.running: + sleep(0.001) + dt_cached_parallel = perf_counter() - t1_cache + self.assertLess( dt_parallel, 0.5 * dt_serial, @@ -50,6 +68,12 @@ def make_workflow(label): f"{dt_parallel} and {dt_serial} for parallel and serial times, " f"respectively" ) + self.assertLess( + dt_cached_parallel, + 0.01 * t, + msg=f"The cache should be trivially fast compared to actual execution of " + f"a sleep node" + ) if __name__ == '__main__': diff --git a/tests/integration/test_workflow.py b/tests/integration/test_workflow.py index a87ab335..2e8f5592 100644 --- a/tests/integration/test_workflow.py +++ b/tests/integration/test_workflow.py @@ -1,6 +1,7 @@ import math import pickle import random +import time import unittest from pyiron_workflow._tests import ensure_tests_in_python_path @@ -40,7 +41,7 @@ def test_manually_constructed_cyclic_graph(self): Check that cyclic graphs run. """ - @Workflow.wrap.as_function_node() + @Workflow.wrap.as_function_node(use_cache=False) def randint(low=0, high=20): rand = random.randint(low, high) print(f"Generating random number between {low} and {high}...{rand}!") @@ -62,18 +63,14 @@ def node_function(value, limit=10): value_gt_limit = value > limit return value_gt_limit - def process_run_result(self, function_output): - """ - Process the output as usual, then fire signals accordingly. - """ - super().process_run_result(function_output) - + @property + def emitting_channels(self) -> tuple[OutputSignal]: if self.outputs.value_gt_limit.value: print(f"{self.inputs.value.value} > {self.inputs.limit.value}") - self.signals.output.true() + return (*super().emitting_channels, self.signals.output.true) else: print(f"{self.inputs.value.value} <= {self.inputs.limit.value}") - self.signals.output.false() + return (*super().emitting_channels, self.signals.output.false) @Workflow.wrap.as_function_node("sqrt") def sqrt(value=0): @@ -160,6 +157,7 @@ def test_executors(self): wf.b = wf.a + 1 # Injected wf.c = Workflow.create.function_node(foo, wf.b) # Instantiated from function wf.d = Bar(wf.c) # From decorated function + wf.use_cache = False reference_output = wf() @@ -199,6 +197,45 @@ def test_executors(self): for child in wf: child.executor = None + def test_cache(self): + wf = Workflow("tmp") + wf.use_cache = True + wf.a = wf.create.standard.UserInput(0) + wf.b = wf.a + 1 + + first_out = wf() + + @Workflow.wrap.as_function_node("as_string") + def Sleep(t): + time.sleep(t) + return "slept" + + wf.c = Sleep(wf.b) + + second_out = wf() + self.assertNotEqual( + first_out, + second_out, + msg="Even thought the _input_ hasn't changed, we expect to avoid the first " + "(cached) result by virtue of resetting the cache when the body of " + "the composite graph has changed" + ) + + t0 = time.perf_counter() + third_out = wf() + dt = time.perf_counter() - t0 + self.assertEqual( + third_out, + second_out, + msg="This time there is no change and we expect the cached result" + ) + self.assertLess( + dt, + 0.1 * wf.c.inputs.t.value, + msg="And because it used the cache we expect it much faster than the sleep " + "time" + ) + if __name__ == '__main__': unittest.main() diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index bb7123b8..42ce5fab 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -102,6 +102,9 @@ def test_fetch_input(self): ) def test_check_readiness(self): + n3_cache = self.n3.use_cache + self.n3.use_cache = False + with self.assertRaises( ValueError, msg="When input is not data, we should fail early" @@ -139,6 +142,8 @@ def test_check_readiness(self): msg="After manually resetting the failed state and providing good input, " "running should proceed" ) + + self.n3.use_cache = n3_cache def test_force_local_execution(self): self.n1.executor = ProcessPoolExecutor()