From 90e6c5b07db5e396c4cd736f5f1b933351e58ce7 Mon Sep 17 00:00:00 2001 From: Vijay Prakash Dwivedi Date: Sun, 17 Oct 2021 19:21:47 +0800 Subject: [PATCH] initial commit --- .gitignore | 16 + LICENSE | 21 + README.md | 53 ++ configs/GatedGCN_MOLPCBA_LSPE.json | 41 ++ configs/GatedGCN_MOLPCBA_LapPE.json | 41 ++ configs/GatedGCN_MOLPCBA_NoPE.json | 41 ++ configs/GatedGCN_MOLTOX21_LSPE.json | 41 ++ configs/GatedGCN_MOLTOX21_LapPE.json | 41 ++ configs/GatedGCN_MOLTOX21_NoPE.json | 41 ++ configs/GatedGCN_ZINC_LSPE.json | 41 ++ .../GatedGCN_ZINC_LSPE_withLapEigLoss.json | 41 ++ configs/GatedGCN_ZINC_LapPE.json | 41 ++ configs/GatedGCN_ZINC_NoPE.json | 41 ++ configs/GraphiT_MOLTOX21_LSPE.json | 47 ++ configs/GraphiT_MOLTOX21_NoPE.json | 47 ++ configs/GraphiT_ZINC_LSPE.json | 49 ++ configs/GraphiT_ZINC_NoPE.json | 49 ++ configs/PNA_MOLPCBA_LSPE.json | 47 ++ configs/PNA_MOLPCBA_NoPE.json | 47 ++ configs/PNA_MOLTOX21_LSPE.json | 48 ++ configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json | 48 ++ configs/PNA_MOLTOX21_NoPE.json | 48 ++ configs/PNA_ZINC_LSPE.json | 51 ++ configs/PNA_ZINC_NoPE.json | 51 ++ configs/SAN_MOLTOX21_LSPE.json | 45 ++ configs/SAN_MOLTOX21_NoPE.json | 45 ++ configs/SAN_ZINC_LSPE.json | 47 ++ configs/SAN_ZINC_NoPE.json | 47 ++ data/data.py | 21 + data/molecules.py | 331 ++++++++++++ data/molecules/test.index | 1 + data/molecules/train.index | 1 + data/molecules/val.index | 1 + data/ogb_mol.py | 254 +++++++++ data/script_download_ZINC.sh | 21 + docs/01_repo_installation.md | 84 +++ docs/02_download_datasets.md | 18 + docs/03_run_codes.md | 87 +++ docs/gnn-lspe.png | Bin 0 -> 190513 bytes environment_cpu.yml | 44 ++ environment_gpu.yml | 47 ++ layers/gatedgcn_layer.py | 84 +++ layers/gatedgcn_lspe_layer.py | 133 +++++ layers/graphit_gt_layer.py | 273 ++++++++++ layers/graphit_gt_lspe_layer.py | 325 +++++++++++ layers/mlp_readout_layer.py | 45 ++ layers/pna_layer.py | 270 ++++++++++ layers/pna_lspe_layer.py | 350 ++++++++++++ layers/pna_utils.py | 407 ++++++++++++++ layers/san_gt_layer.py | 267 ++++++++++ layers/san_gt_lspe_layer.py | 318 +++++++++++ main_OGBMOL_graph_classification.py | 504 ++++++++++++++++++ main_ZINC_graph_regression.py | 453 ++++++++++++++++ .../gatedgcn_net.py | 137 +++++ .../graphit_net.py | 138 +++++ nets/OGBMOL_graph_classification/load_net.py | 31 ++ nets/OGBMOL_graph_classification/pna_net.py | 199 +++++++ nets/OGBMOL_graph_classification/san_net.py | 141 +++++ nets/ZINC_graph_regression/gatedgcn_net.py | 171 ++++++ nets/ZINC_graph_regression/graphit_net.py | 145 +++++ nets/ZINC_graph_regression/load_net.py | 31 ++ nets/ZINC_graph_regression/pna_net.py | 168 ++++++ nets/ZINC_graph_regression/san_net.py | 147 +++++ scripts/OGBMOL/script_MOLPCBA_all.sh | 65 +++ scripts/OGBMOL/script_MOLTOX21_all.sh | 95 ++++ .../generate_statistics_OGBMOL.ipynb | 176 ++++++ .../generate_statistics_ZINC.ipynb | 174 ++++++ scripts/TensorBoard/script_tensorboard.sh | 21 + scripts/ZINC/script_ZINC_all.sh | 97 ++++ train/metrics.py | 68 +++ train/train_OGBMOL_graph_classification.py | 131 +++++ train/train_ZINC_graph_regression.py | 84 +++ utils/cleaner_main.py | 102 ++++ utils/plot_util.py | 46 ++ utils/visualize_RWPE_studies.ipynb | 197 +++++++ 75 files changed, 8119 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 configs/GatedGCN_MOLPCBA_LSPE.json create mode 100644 configs/GatedGCN_MOLPCBA_LapPE.json create mode 100644 configs/GatedGCN_MOLPCBA_NoPE.json create mode 100644 configs/GatedGCN_MOLTOX21_LSPE.json create mode 100644 configs/GatedGCN_MOLTOX21_LapPE.json create mode 100644 configs/GatedGCN_MOLTOX21_NoPE.json create mode 100644 configs/GatedGCN_ZINC_LSPE.json create mode 100644 configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json create mode 100644 configs/GatedGCN_ZINC_LapPE.json create mode 100644 configs/GatedGCN_ZINC_NoPE.json create mode 100644 configs/GraphiT_MOLTOX21_LSPE.json create mode 100644 configs/GraphiT_MOLTOX21_NoPE.json create mode 100644 configs/GraphiT_ZINC_LSPE.json create mode 100644 configs/GraphiT_ZINC_NoPE.json create mode 100644 configs/PNA_MOLPCBA_LSPE.json create mode 100644 configs/PNA_MOLPCBA_NoPE.json create mode 100644 configs/PNA_MOLTOX21_LSPE.json create mode 100644 configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json create mode 100644 configs/PNA_MOLTOX21_NoPE.json create mode 100644 configs/PNA_ZINC_LSPE.json create mode 100644 configs/PNA_ZINC_NoPE.json create mode 100644 configs/SAN_MOLTOX21_LSPE.json create mode 100644 configs/SAN_MOLTOX21_NoPE.json create mode 100644 configs/SAN_ZINC_LSPE.json create mode 100644 configs/SAN_ZINC_NoPE.json create mode 100644 data/data.py create mode 100644 data/molecules.py create mode 100644 data/molecules/test.index create mode 100644 data/molecules/train.index create mode 100644 data/molecules/val.index create mode 100644 data/ogb_mol.py create mode 100644 data/script_download_ZINC.sh create mode 100644 docs/01_repo_installation.md create mode 100644 docs/02_download_datasets.md create mode 100644 docs/03_run_codes.md create mode 100644 docs/gnn-lspe.png create mode 100644 environment_cpu.yml create mode 100644 environment_gpu.yml create mode 100644 layers/gatedgcn_layer.py create mode 100644 layers/gatedgcn_lspe_layer.py create mode 100644 layers/graphit_gt_layer.py create mode 100644 layers/graphit_gt_lspe_layer.py create mode 100644 layers/mlp_readout_layer.py create mode 100644 layers/pna_layer.py create mode 100644 layers/pna_lspe_layer.py create mode 100644 layers/pna_utils.py create mode 100644 layers/san_gt_layer.py create mode 100644 layers/san_gt_lspe_layer.py create mode 100644 main_OGBMOL_graph_classification.py create mode 100644 main_ZINC_graph_regression.py create mode 100644 nets/OGBMOL_graph_classification/gatedgcn_net.py create mode 100644 nets/OGBMOL_graph_classification/graphit_net.py create mode 100644 nets/OGBMOL_graph_classification/load_net.py create mode 100644 nets/OGBMOL_graph_classification/pna_net.py create mode 100644 nets/OGBMOL_graph_classification/san_net.py create mode 100644 nets/ZINC_graph_regression/gatedgcn_net.py create mode 100644 nets/ZINC_graph_regression/graphit_net.py create mode 100644 nets/ZINC_graph_regression/load_net.py create mode 100644 nets/ZINC_graph_regression/pna_net.py create mode 100644 nets/ZINC_graph_regression/san_net.py create mode 100644 scripts/OGBMOL/script_MOLPCBA_all.sh create mode 100644 scripts/OGBMOL/script_MOLTOX21_all.sh create mode 100644 scripts/StatisticalResults/generate_statistics_OGBMOL.ipynb create mode 100644 scripts/StatisticalResults/generate_statistics_ZINC.ipynb create mode 100644 scripts/TensorBoard/script_tensorboard.sh create mode 100644 scripts/ZINC/script_ZINC_all.sh create mode 100644 train/metrics.py create mode 100644 train/train_OGBMOL_graph_classification.py create mode 100644 train/train_ZINC_graph_regression.py create mode 100644 utils/cleaner_main.py create mode 100644 utils/plot_util.py create mode 100644 utils/visualize_RWPE_studies.ipynb diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..62e43e1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +#common +**/*.DS_Store +**/*.ipynb_checkpoints/ +**/__pycache__ +out/ + +#ZINC dataset +data/molecules/*.pkl +data/molecules/*.pickle +data/molecules/*.zip +data/molecules/zinc_full/*.pkl +data/molecules/zinc_full/*.pickle + + +#OGB +dataset/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..44754d3 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Vijay Prakash Dwivedi, Anh Tuan Luu, Thomas Laurent, Yoshua Bengio and Xavier Bresson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..e288bfb --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ + + +# Graph Neural Networks with
Learnable Structural and Positional Representations + +
+ +Source code for the paper "**Graph Neural Networks with Learnable Structural and Positional Representations**" by Vijay Prakash Dwivedi, Anh Tuan Luu, Thomas Laurent, Yoshua Bengio and Xavier Bresson. + +We propose a novel GNN architecture in which the structural and positional representations are decoupled, and are learnt separately to learn these two essential properties. The architecture, named **MPGNNs-LSPE** (MPGNNs with **L**earnable **S**tructural and **P**ositional **E**ncodings), is generic enough that it can be applied to any GNN model of interest which fits into the popular 'message-passing framework', including Transformers. + +![MPGNNs-LSPE](./docs/gnn-lspe.png) + +
+ + +## 1. Repo installation + +[Follow these instructions](./docs/01_repo_installation.md) to install the repo and setup the environment. + + +
+ +## 2. Download datasets + +[Proceed as follows](./docs/02_download_datasets.md) to download the benchmark datasets. + + +
+ +## 3. Reproducibility + +[Use this page](./docs/03_run_codes.md) to run the codes and reproduce the published results. + + +
+ +## 4. Reference + +TODO +[ArXiv's paper](https://arxiv.org/pdf/2110.xxxxx.pdf) +``` +@article{dwivedi2021graph, + title={Graph Neural Networks with Learnable Structural and Positional Representations}, + author={Dwivedi, Vijay Prakash and Luu, Anh Tuan and Laurent, Thomas and Bengio, Yoshua and Bresson, Xavier}, + journal={arXiv preprint arXiv:2110.xxxxx}, + year={2021} +} +``` + + + +


+ diff --git a/configs/GatedGCN_MOLPCBA_LSPE.json b/configs/GatedGCN_MOLPCBA_LSPE.json new file mode 100644 index 0000000..0e79994 --- /dev/null +++ b/configs/GatedGCN_MOLPCBA_LSPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLPCBA", + + "out_dir": "out/GatedGCN_MOLPCBA_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-4, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 96 + }, + + "net_params": { + "L": 8, + "hidden_dim": 118, + "out_dim": 118, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.1, + "batch_norm": true, + "pos_enc_dim": 20, + "pe_init": "rand_walk", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_MOLPCBA_LapPE.json b/configs/GatedGCN_MOLPCBA_LapPE.json new file mode 100644 index 0000000..70a9a0b --- /dev/null +++ b/configs/GatedGCN_MOLPCBA_LapPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLPCBA", + + "out_dir": "out/GatedGCN_MOLPCBA_LapPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-4, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 48 + }, + + "net_params": { + "L": 8, + "hidden_dim": 154, + "out_dim": 154, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.1, + "batch_norm": true, + "pos_enc_dim": 3, + "pe_init": "lap_pe", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_MOLPCBA_NoPE.json b/configs/GatedGCN_MOLPCBA_NoPE.json new file mode 100644 index 0000000..141092e --- /dev/null +++ b/configs/GatedGCN_MOLPCBA_NoPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLPCBA", + + "out_dir": "out/GatedGCN_MOLPCBA_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-4, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 48 + }, + + "net_params": { + "L": 8, + "hidden_dim": 154, + "out_dim": 154, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.1, + "batch_norm": true, + "pos_enc_dim": 3, + "pe_init": "no_pe", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_MOLTOX21_LSPE.json b/configs/GatedGCN_MOLTOX21_LSPE.json new file mode 100644 index 0000000..456bc36 --- /dev/null +++ b/configs/GatedGCN_MOLTOX21_LSPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLTOX21", + + "out_dir": "out/GatedGCN_MOLTOX21_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-5, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 48 + }, + + "net_params": { + "L": 8, + "hidden_dim": 118, + "out_dim": 118, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "batch_norm": true, + "pos_enc_dim": 16, + "pe_init": "rand_walk", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_MOLTOX21_LapPE.json b/configs/GatedGCN_MOLTOX21_LapPE.json new file mode 100644 index 0000000..4b06e81 --- /dev/null +++ b/configs/GatedGCN_MOLTOX21_LapPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLTOX21", + + "out_dir": "out/GatedGCN_MOLTOX21_LapPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-5, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 48 + }, + + "net_params": { + "L": 8, + "hidden_dim": 154, + "out_dim": 154, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "batch_norm": true, + "pos_enc_dim": 3, + "pe_init": "lap_pe", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_MOLTOX21_NoPE.json b/configs/GatedGCN_MOLTOX21_NoPE.json new file mode 100644 index 0000000..e019c71 --- /dev/null +++ b/configs/GatedGCN_MOLTOX21_NoPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "OGBG-MOLTOX21", + + "out_dir": "out/GatedGCN_MOLTOX21_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-5, + "weight_decay": 0.0, + "print_epoch_interval": 1, + "max_time": 48 + }, + + "net_params": { + "L": 8, + "hidden_dim": 154, + "out_dim": 154, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "batch_norm": true, + "pos_enc_dim": 3, + "pe_init": "no_pe", + "use_lapeig_loss": false, + "alpha_loss": 1e-3, + "lambda_loss": 100 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_ZINC_LSPE.json b/configs/GatedGCN_ZINC_LSPE.json new file mode 100644 index 0000000..4875bd6 --- /dev/null +++ b/configs/GatedGCN_ZINC_LSPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "ZINC", + + "out_dir": "out/GatedGCN_ZINC_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 12 + }, + + "net_params": { + "L": 16, + "hidden_dim": 59, + "out_dim": 59, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "batch_norm": true, + "pos_enc_dim": 20, + "pe_init": "rand_walk", + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json b/configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json new file mode 100644 index 0000000..c5f6122 --- /dev/null +++ b/configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "ZINC", + + "out_dir": "out/GatedGCN_ZINC_LSPE_withLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 12 + }, + + "net_params": { + "L": 16, + "hidden_dim": 59, + "out_dim": 59, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "batch_norm": true, + "pos_enc_dim": 20, + "pe_init": "rand_walk", + "use_lapeig_loss": true, + "alpha_loss": 1, + "lambda_loss": 1e-1 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_ZINC_LapPE.json b/configs/GatedGCN_ZINC_LapPE.json new file mode 100644 index 0000000..4f82e16 --- /dev/null +++ b/configs/GatedGCN_ZINC_LapPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "ZINC", + + "out_dir": "out/GatedGCN_ZINC_LapPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 12 + }, + + "net_params": { + "L": 16, + "hidden_dim": 78, + "out_dim": 78, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "batch_norm": true, + "pe_init": "lap_pe", + "pos_enc_dim": 8, + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1 + } +} \ No newline at end of file diff --git a/configs/GatedGCN_ZINC_NoPE.json b/configs/GatedGCN_ZINC_NoPE.json new file mode 100644 index 0000000..cdd8cfc --- /dev/null +++ b/configs/GatedGCN_ZINC_NoPE.json @@ -0,0 +1,41 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GatedGCN", + "dataset": "ZINC", + + "out_dir": "out/GatedGCN_ZINC_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 12 + }, + + "net_params": { + "L": 16, + "hidden_dim": 78, + "out_dim": 78, + "residual": true, + "edge_feat": true, + "readout": "mean", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "batch_norm": true, + "pe_init": "no_pe", + "pos_enc_dim": 16, + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1 + } +} \ No newline at end of file diff --git a/configs/GraphiT_MOLTOX21_LSPE.json b/configs/GraphiT_MOLTOX21_LSPE.json new file mode 100644 index 0000000..5fc8be5 --- /dev/null +++ b/configs/GraphiT_MOLTOX21_LSPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "GraphiT", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/GraphiT_MOLTOX21_LSPE_noLapEigLoss/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "full_graph": true, + + "L": 10, + "hidden_dim": 64, + "out_dim": 64, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.5, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "rand_walk", + "pos_enc_dim": 16, + "adaptive_edge_PE": true, + "p_steps": 16, + "gamma": 0.25 + } +} \ No newline at end of file diff --git a/configs/GraphiT_MOLTOX21_NoPE.json b/configs/GraphiT_MOLTOX21_NoPE.json new file mode 100644 index 0000000..4bb2c58 --- /dev/null +++ b/configs/GraphiT_MOLTOX21_NoPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "GraphiT", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/GraphiT_MOLTOX21_NoPE/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "full_graph": true, + + "L": 10, + "hidden_dim": 88, + "out_dim": 88, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.5, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "no_pe", + "pos_enc_dim": 12, + "adaptive_edge_PE": true, + "p_steps": 16, + "gamma": 0.25 + } +} \ No newline at end of file diff --git a/configs/GraphiT_ZINC_LSPE.json b/configs/GraphiT_ZINC_LSPE.json new file mode 100644 index 0000000..cb0235d --- /dev/null +++ b/configs/GraphiT_ZINC_LSPE.json @@ -0,0 +1,49 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GraphiT", + "dataset": "ZINC", + + "out_dir": "out/GraphiT_ZINC_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 32, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "full_graph": true, + + "L": 10, + "hidden_dim": 48, + "out_dim": 48, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "rand_walk", + "pos_enc_dim": 16, + "adaptive_edge_PE": true, + "p_steps": 16, + "gamma": 0.25 + } +} \ No newline at end of file diff --git a/configs/GraphiT_ZINC_NoPE.json b/configs/GraphiT_ZINC_NoPE.json new file mode 100644 index 0000000..744ab5f --- /dev/null +++ b/configs/GraphiT_ZINC_NoPE.json @@ -0,0 +1,49 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "GraphiT", + "dataset": "ZINC", + + "out_dir": "out/GraphiT_ZINC_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 32, + "init_lr": 0.0003, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "full_graph": true, + + "L": 10, + "hidden_dim": 64, + "out_dim": 64, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "no_pe", + "pos_enc_dim": 16, + "adaptive_edge_PE": true, + "p_steps": 16, + "gamma": 0.25 + } +} \ No newline at end of file diff --git a/configs/PNA_MOLPCBA_LSPE.json b/configs/PNA_MOLPCBA_LSPE.json new file mode 100644 index 0000000..0043783 --- /dev/null +++ b/configs/PNA_MOLPCBA_LSPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "PNA", + "dataset": "OGBG-MOLPCBA", + "out_dir":"out/PNA_MOLPCBA_LSPE_noLapEigLoss/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 512, + "init_lr": 0.0005, + "lr_reduce_factor": 0.8, + "lr_schedule_patience": 10, + "min_lr": 2e-5, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "L": 4, + "hidden_dim": 322, + "out_dim": 322, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "dropout_2": 0.1, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean sum max", + "scalers": "identity", + "gru": false, + "edge_dim": 16, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "use_lapeig_loss": false, + "alpha_loss": 1e-2, + "lambda_loss": 1e-2, + "pe_init": "rand_walk", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_MOLPCBA_NoPE.json b/configs/PNA_MOLPCBA_NoPE.json new file mode 100644 index 0000000..d58c8d2 --- /dev/null +++ b/configs/PNA_MOLPCBA_NoPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "PNA", + "dataset": "OGBG-MOLPCBA", + "out_dir":"out/PNA_MOLPCBA_NoPE/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 512, + "init_lr": 0.0005, + "lr_reduce_factor": 0.8, + "lr_schedule_patience": 4, + "min_lr": 2e-5, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "L": 4, + "hidden_dim": 510, + "out_dim": 510, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.2, + "dropout_2": 0.0, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean sum max", + "scalers": "identity", + "gru": false, + "edge_dim": 16, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "use_lapeig_loss": false, + "alpha_loss": 1e-2, + "lambda_loss": 1e-2, + "pe_init": "no_pe", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_MOLTOX21_LSPE.json b/configs/PNA_MOLTOX21_LSPE.json new file mode 100644 index 0000000..8a7e9f9 --- /dev/null +++ b/configs/PNA_MOLTOX21_LSPE.json @@ -0,0 +1,48 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "PNA", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/PNA_MOLTOX21_LSPE_noLapEigLoss/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.0005, + "lr_reduce_factor": 0.8, + "lr_schedule_patience": 10, + "min_lr": 2e-5, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "L": 8, + "hidden_dim": 140, + "out_dim": 140, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "dropout_2": 0.1, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean max min std", + "scalers": "identity amplification attenuation", + "gru": false, + "edge_dim": 50, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "lpe_variant": "native_lpe", + "use_lapeig_loss": false, + "alpha_loss": 1e-2, + "lambda_loss": 1e-2, + "pe_init": "rand_walk", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json b/configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json new file mode 100644 index 0000000..58ef016 --- /dev/null +++ b/configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json @@ -0,0 +1,48 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "PNA", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/PNA_MOLTOX21_LSPE_withLapEigLoss/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.0005, + "lr_reduce_factor": 0.8, + "lr_schedule_patience": 10, + "min_lr": 2e-5, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "L": 8, + "hidden_dim": 140, + "out_dim": 140, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "dropout_2": 0.1, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean max min std", + "scalers": "identity amplification attenuation", + "gru": false, + "edge_dim": 50, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "lpe_variant": "native_lpe", + "use_lapeig_loss": true, + "alpha_loss": 1e-1, + "lambda_loss": 100, + "pe_init": "rand_walk", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_MOLTOX21_NoPE.json b/configs/PNA_MOLTOX21_NoPE.json new file mode 100644 index 0000000..8619000 --- /dev/null +++ b/configs/PNA_MOLTOX21_NoPE.json @@ -0,0 +1,48 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "PNA", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/PNA_MOLTOX21_NoPE/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 256, + "init_lr": 0.0005, + "lr_reduce_factor": 0.8, + "lr_schedule_patience": 10, + "min_lr": 2e-5, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "L": 8, + "hidden_dim": 206, + "out_dim": 206, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.4, + "dropout_2": 0.1, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean max min std", + "scalers": "identity amplification attenuation", + "gru": false, + "edge_dim": 50, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "lpe_variant": "native_lpe", + "use_lapeig_loss": false, + "alpha_loss": 1e-2, + "lambda_loss": 1e-2, + "pe_init": "no_pe", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_ZINC_LSPE.json b/configs/PNA_ZINC_LSPE.json new file mode 100644 index 0000000..9a4a674 --- /dev/null +++ b/configs/PNA_ZINC_LSPE.json @@ -0,0 +1,51 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "PNA", + "dataset": "ZINC", + + "out_dir": "out/PNA_ZINC_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "L": 16, + "hidden_dim": 55, + "out_dim": 55, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean max min std", + "scalers": "identity amplification attenuation", + "towers": 5, + "divide_input_first": true, + "divide_input_last": true, + "gru": false, + "edge_dim": 40, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "rand_walk", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/PNA_ZINC_NoPE.json b/configs/PNA_ZINC_NoPE.json new file mode 100644 index 0000000..67ef1ab --- /dev/null +++ b/configs/PNA_ZINC_NoPE.json @@ -0,0 +1,51 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "PNA", + "dataset": "ZINC", + + "out_dir": "out/PNA_ZINC_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.001, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 3e-6, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "L": 16, + "hidden_dim": 70, + "out_dim": 70, + "residual": true, + "edge_feat": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "graph_norm": true, + "batch_norm": true, + "aggregators": "mean max min std", + "scalers": "identity amplification attenuation", + "towers": 5, + "divide_input_first": true, + "divide_input_last": true, + "gru": false, + "edge_dim": 40, + "pretrans_layers" : 1, + "posttrans_layers" : 1, + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "no_pe", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/SAN_MOLTOX21_LSPE.json b/configs/SAN_MOLTOX21_LSPE.json new file mode 100644 index 0000000..0c15005 --- /dev/null +++ b/configs/SAN_MOLTOX21_LSPE.json @@ -0,0 +1,45 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "SAN", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/SAN_MOLTOX21_LSPE_noLapEigLoss/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "full_graph": true, + "init_gamma": 0.1, + + "L": 10, + "hidden_dim": 64, + "out_dim": 64, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.5, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "rand_walk", + "pos_enc_dim": 12 + } +} \ No newline at end of file diff --git a/configs/SAN_MOLTOX21_NoPE.json b/configs/SAN_MOLTOX21_NoPE.json new file mode 100644 index 0000000..9922214 --- /dev/null +++ b/configs/SAN_MOLTOX21_NoPE.json @@ -0,0 +1,45 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + "model": "SAN", + "dataset": "OGBG-MOLTOX21", + "out_dir":"out/SAN_MOLTOX21_NoPE/", + + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 128, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 96 + }, + "net_params": { + "full_graph": true, + "init_gamma": 0.1, + + "L": 10, + "hidden_dim": 88, + "out_dim": 88, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.5, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "no_pe", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/SAN_ZINC_LSPE.json b/configs/SAN_ZINC_LSPE.json new file mode 100644 index 0000000..9e8dd59 --- /dev/null +++ b/configs/SAN_ZINC_LSPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "SAN", + "dataset": "ZINC", + + "out_dir": "out/SAN_ZINC_LSPE_noLapEigLoss/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 32, + "init_lr": 0.0007, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "full_graph": true, + "init_gamma": 0.1, + + "L": 10, + "hidden_dim": 48, + "out_dim": 48, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "rand_walk", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/configs/SAN_ZINC_NoPE.json b/configs/SAN_ZINC_NoPE.json new file mode 100644 index 0000000..2dd8619 --- /dev/null +++ b/configs/SAN_ZINC_NoPE.json @@ -0,0 +1,47 @@ +{ + "gpu": { + "use": true, + "id": 0 + }, + + "model": "SAN", + "dataset": "ZINC", + + "out_dir": "out/SAN_ZINC_NoPE/", + + "params": { + "seed": 41, + "epochs": 1000, + "batch_size": 32, + "init_lr": 0.0003, + "lr_reduce_factor": 0.5, + "lr_schedule_patience": 25, + "min_lr": 1e-6, + "weight_decay": 0.0, + "print_epoch_interval": 5, + "max_time": 48 + }, + + "net_params": { + "full_graph": true, + "init_gamma": 0.1, + + "L": 10, + "hidden_dim": 64, + "out_dim": 64, + "n_heads": 8, + + "residual": true, + "readout": "sum", + "in_feat_dropout": 0.0, + "dropout": 0.0, + "layer_norm": false, + "batch_norm": true, + + "use_lapeig_loss": false, + "alpha_loss": 1e-4, + "lambda_loss": 1000, + "pe_init": "no_pe", + "pos_enc_dim": 16 + } +} \ No newline at end of file diff --git a/data/data.py b/data/data.py new file mode 100644 index 0000000..3871142 --- /dev/null +++ b/data/data.py @@ -0,0 +1,21 @@ +""" + File to load dataset based on user control from main file +""" + +from data.molecules import MoleculeDataset +from data.ogb_mol import OGBMOLDataset + +def LoadData(DATASET_NAME): + """ + This function is called in the main.py file + returns: + ; dataset object + """ + + # handling for (ZINC) molecule dataset + if DATASET_NAME == 'ZINC' or DATASET_NAME == 'ZINC-full': + return MoleculeDataset(DATASET_NAME) + + # handling for MOLPCBA and MOLTOX21 dataset + if DATASET_NAME in ['OGBG-MOLPCBA', 'OGBG-MOLTOX21']: + return OGBMOLDataset(DATASET_NAME) \ No newline at end of file diff --git a/data/molecules.py b/data/molecules.py new file mode 100644 index 0000000..a80eb40 --- /dev/null +++ b/data/molecules.py @@ -0,0 +1,331 @@ +import torch +import torch.nn.functional as F +import pickle +import torch.utils.data +import time +import os +import numpy as np + +import csv + +import dgl + +from scipy import sparse as sp +import numpy as np +import networkx as nx + +# The dataset pickle and index files are in ./data/molecules/ dir +# [.pickle and .index; for split 'train', 'val' and 'test'] + + + + +class MoleculeDGL(torch.utils.data.Dataset): + def __init__(self, data_dir, split, num_graphs=None): + self.data_dir = data_dir + self.split = split + self.num_graphs = num_graphs + + with open(data_dir + "/%s.pickle" % self.split,"rb") as f: + self.data = pickle.load(f) + + if self.num_graphs in [10000, 1000]: + # loading the sampled indices from file ./zinc_molecules/.index + with open(data_dir + "/%s.index" % self.split,"r") as f: + data_idx = [list(map(int, idx)) for idx in csv.reader(f)] + self.data = [ self.data[i] for i in data_idx[0] ] + + assert len(self.data)==num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k" + + """ + data is a list of Molecule dict objects with following attributes + + molecule = data[idx] + ; molecule['num_atom'] : nb of atoms, an integer (N) + ; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type + ; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type + ; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable + """ + + self.graph_lists = [] + self.graph_labels = [] + self.n_samples = len(self.data) + self._prepare() + + def _prepare(self): + print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper())) + + for molecule in self.data: + node_features = molecule['atom_type'].long() + + adj = molecule['bond_type'] + edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list + + edge_idxs_in_adj = edge_list.split(1, dim=1) + edge_features = adj[edge_idxs_in_adj].reshape(-1).long() + + # Create the DGL Graph + g = dgl.DGLGraph() + g.add_nodes(molecule['num_atom']) + g.ndata['feat'] = node_features + + for src, dst in edge_list: + g.add_edges(src.item(), dst.item()) + g.edata['feat'] = edge_features + + self.graph_lists.append(g) + self.graph_labels.append(molecule['logP_SA_cycle_normalized']) + + def __len__(self): + """Return the number of graphs in the dataset.""" + return self.n_samples + + def __getitem__(self, idx): + """ + Get the idx^th sample. + Parameters + --------- + idx : int + The sample index. + Returns + ------- + (dgl.DGLGraph, int) + DGLGraph with node feature stored in `feat` field + And its label. + """ + return self.graph_lists[idx], self.graph_labels[idx] + + +class MoleculeDatasetDGL(torch.utils.data.Dataset): + def __init__(self, name='Zinc'): + t0 = time.time() + self.name = name + + self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well + self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well + + data_dir='./data/molecules' + + if self.name == 'ZINC-full': + data_dir='./data/molecules/zinc_full' + self.train = MoleculeDGL(data_dir, 'train', num_graphs=220011) + self.val = MoleculeDGL(data_dir, 'val', num_graphs=24445) + self.test = MoleculeDGL(data_dir, 'test', num_graphs=5000) + else: + self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000) + self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000) + self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000) + print("Time taken: {:.4f}s".format(time.time()-t0)) + + + +def add_eig_vec(g, pos_enc_dim): + """ + Graph positional encoding v/ Laplacian eigenvectors + This func is for eigvec visualization, same code as positional_encoding() func, + but stores value in a diff key 'eigvec' + """ + + # Laplacian + A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float) + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(g.number_of_nodes()) - N * A * N + + # Eigenvectors with numpy + EigVal, EigVec = np.linalg.eig(L.toarray()) + idx = EigVal.argsort() # increasing order + EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx]) + g.ndata['eigvec'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() + + # zero padding to the end if n < pos_enc_dim + n = g.number_of_nodes() + if n <= pos_enc_dim: + g.ndata['eigvec'] = F.pad(g.ndata['eigvec'], (0, pos_enc_dim - n + 1), value=float('0')) + + return g + + +def lap_positional_encoding(g, pos_enc_dim): + """ + Graph positional encoding v/ Laplacian eigenvectors + """ + + # Laplacian + A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float) + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(g.number_of_nodes()) - N * A * N + + # Eigenvectors with numpy + EigVal, EigVec = np.linalg.eig(L.toarray()) + idx = EigVal.argsort() # increasing order + EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx]) + g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() + g.ndata['eigvec'] = g.ndata['pos_enc'] + + # # Eigenvectors with scipy + # EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR') + # EigVec = EigVec[:, EigVal.argsort()] # increasing order + # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float() + + return g + + +def init_positional_encoding(g, pos_enc_dim, type_init): + """ + Initializing positional encoding with RWPE + """ + + n = g.number_of_nodes() + + if type_init == 'rand_walk': + # Geometric diffusion features with Random Walk + A = g.adjacency_matrix(scipy_fmt="csr") + Dinv = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -1.0, dtype=float) # D^-1 + RW = A * Dinv + M = RW + + # Iterate + nb_pos_enc = pos_enc_dim + PE = [torch.from_numpy(M.diagonal()).float()] + M_power = M + for _ in range(nb_pos_enc-1): + M_power = M_power * M + PE.append(torch.from_numpy(M_power.diagonal()).float()) + PE = torch.stack(PE,dim=-1) + g.ndata['pos_enc'] = PE + + return g + + +def make_full_graph(g, adaptive_weighting=None): + + full_g = dgl.from_networkx(nx.complete_graph(g.number_of_nodes())) + + #Here we copy over the node feature data and laplace encodings + full_g.ndata['feat'] = g.ndata['feat'] + + try: + full_g.ndata['pos_enc'] = g.ndata['pos_enc'] + except: + pass + + try: + full_g.ndata['eigvec'] = g.ndata['eigvec'] + except: + pass + + #Populate edge features w/ 0s + full_g.edata['feat']=torch.zeros(full_g.number_of_edges(), dtype=torch.long) + full_g.edata['real']=torch.zeros(full_g.number_of_edges(), dtype=torch.long) + + #Copy real edge data over + full_g.edges[g.edges(form='uv')[0].tolist(), g.edges(form='uv')[1].tolist()].data['feat'] = g.edata['feat'] + full_g.edges[g.edges(form='uv')[0].tolist(), g.edges(form='uv')[1].tolist()].data['real'] = torch.ones(g.edata['feat'].shape[0], dtype=torch.long) + + + # This code section only apply for GraphiT -------------------------------------------- + if adaptive_weighting is not None: + p_steps, gamma = adaptive_weighting + + n = g.number_of_nodes() + A = g.adjacency_matrix(scipy_fmt="csr") + + # Adaptive weighting k_ij for each edge + if p_steps == "qtr_num_nodes": + p_steps = int(0.25*n) + elif p_steps == "half_num_nodes": + p_steps = int(0.5*n) + elif p_steps == "num_nodes": + p_steps = int(n) + elif p_steps == "twice_num_nodes": + p_steps = int(2*n) + + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + I = sp.eye(n) + L = I - N * A * N + + k_RW = I - gamma*L + k_RW_power = k_RW + for _ in range(p_steps - 1): + k_RW_power = k_RW_power.dot(k_RW) + + k_RW_power = torch.from_numpy(k_RW_power.toarray()) + + # Assigning edge features k_RW_eij for adaptive weighting during attention + full_edge_u, full_edge_v = full_g.edges() + num_edges = full_g.number_of_edges() + + k_RW_e_ij = [] + for edge in range(num_edges): + k_RW_e_ij.append(k_RW_power[full_edge_u[edge], full_edge_v[edge]]) + + full_g.edata['k_RW'] = torch.stack(k_RW_e_ij,dim=-1).unsqueeze(-1).float() + # -------------------------------------------------------------------------------------- + + return full_g + + +class MoleculeDataset(torch.utils.data.Dataset): + + def __init__(self, name): + """ + Loading ZINC datasets + """ + start = time.time() + print("[I] Loading dataset %s..." % (name)) + self.name = name + data_dir = 'data/molecules/' + with open(data_dir+name+'.pkl',"rb") as f: + f = pickle.load(f) + self.train = f[0] + self.val = f[1] + self.test = f[2] + self.num_atom_type = f[3] + self.num_bond_type = f[4] + print('train, test, val sizes :',len(self.train),len(self.test),len(self.val)) + print("[I] Finished loading.") + print("[I] Data load time: {:.4f}s".format(time.time()-start)) + + + # form a mini batch from a given list of samples = [(graph, label) pairs] + def collate(self, samples): + # The input samples is a list of pairs (graph, label). + graphs, labels = map(list, zip(*samples)) + labels = torch.tensor(np.array(labels)).unsqueeze(1) + tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))] + tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ] + snorm_n = torch.cat(tab_snorm_n).sqrt() + batched_graph = dgl.batch(graphs) + + return batched_graph, labels, snorm_n + + + def _add_lap_positional_encodings(self, pos_enc_dim): + + # Graph positional encoding v/ Laplacian eigenvectors + self.train.graph_lists = [lap_positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists] + self.val.graph_lists = [lap_positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists] + self.test.graph_lists = [lap_positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists] + + def _add_eig_vecs(self, pos_enc_dim): + + # This is used if we visualize the eigvecs + self.train.graph_lists = [add_eig_vec(g, pos_enc_dim) for g in self.train.graph_lists] + self.val.graph_lists = [add_eig_vec(g, pos_enc_dim) for g in self.val.graph_lists] + self.test.graph_lists = [add_eig_vec(g, pos_enc_dim) for g in self.test.graph_lists] + + def _init_positional_encodings(self, pos_enc_dim, type_init): + + # Initializing positional encoding randomly with l2-norm 1 + self.train.graph_lists = [init_positional_encoding(g, pos_enc_dim, type_init) for g in self.train.graph_lists] + self.val.graph_lists = [init_positional_encoding(g, pos_enc_dim, type_init) for g in self.val.graph_lists] + self.test.graph_lists = [init_positional_encoding(g, pos_enc_dim, type_init) for g in self.test.graph_lists] + + def _make_full_graph(self, adaptive_weighting=None): + self.train.graph_lists = [make_full_graph(g, adaptive_weighting) for g in self.train.graph_lists] + self.val.graph_lists = [make_full_graph(g, adaptive_weighting) for g in self.val.graph_lists] + self.test.graph_lists = [make_full_graph(g, adaptive_weighting) for g in self.test.graph_lists] + + + + diff --git a/data/molecules/test.index b/data/molecules/test.index new file mode 100644 index 0000000..428bc26 --- /dev/null +++ b/data/molecules/test.index @@ -0,0 +1 @@ +912,204,2253,2006,1828,1143,839,4467,712,4837,3456,260,244,767,1791,1905,4139,4931,217,4597,1628,4464,3436,1805,3679,4827,2278,53,1307,3462,2787,2276,1273,1763,2757,837,759,3112,792,2940,2817,4945,2166,355,3763,4392,1022,3100,645,4522,2401,2962,4729,1575,569,375,1866,2370,653,1907,827,3113,2277,3714,2988,1332,3032,2910,1716,2187,584,4990,1401,4375,2005,1338,3786,3108,2211,4562,1799,2656,458,1876,262,2584,3286,2193,542,1728,4646,2577,1741,4089,3241,3758,1170,2169,2020,4598,4415,2152,4788,3509,4780,3271,2965,1796,1133,4174,4042,744,385,898,1252,1310,3458,4885,520,3152,3126,4881,3834,4334,2059,4532,94,938,4398,2185,2786,913,2404,3561,1295,3716,26,2157,4100,1463,4158,871,2444,4988,1629,3063,1323,4418,4344,4,4906,2655,4002,159,916,2973,2519,1961,474,1973,4647,701,3981,566,4363,1030,1051,3893,4503,1352,2171,4322,4969,3466,1735,4417,1647,2553,3268,3059,3588,4239,3698,991,2030,1840,524,2769,172,4819,4537,1885,4820,1804,58,581,482,1875,552,257,2706,580,4211,1949,2281,3976,1755,1083,4677,4720,3872,1990,3874,3334,1559,772,794,3531,2902,3469,3367,3825,443,806,496,3298,2779,895,2036,1569,1558,4393,3675,1148,1503,3789,2046,617,3630,4508,802,414,4428,120,764,1936,1362,3329,3978,3943,1751,3285,480,1348,3104,17,3198,2172,3727,2336,3465,4552,3986,1268,1555,2430,1783,479,4744,4441,499,2569,468,410,4785,3905,4119,4350,1289,465,4160,656,1522,561,4874,556,1926,3307,982,4666,2016,4742,4870,325,671,3434,4781,4630,4282,2591,2136,1673,2573,1955,2175,3242,1072,2457,3745,2590,594,76,3754,4612,819,600,4404,1746,4144,1085,2859,563,2001,3027,2334,1292,3589,4450,2478,4333,64,4543,2452,848,1100,945,876,2231,2308,4954,1725,2808,1667,2162,4140,2057,416,756,2266,361,29,2732,1071,2145,3619,4519,3503,4594,79,616,1221,4469,295,3024,4771,4526,1213,3520,1044,342,2525,2987,326,2931,1720,2044,842,2897,4586,1266,1939,1331,1450,3377,203,1469,2721,3372,2032,1304,885,3133,317,3855,1822,1634,3770,2864,2500,1864,1826,193,1582,3264,2689,2282,568,2286,2876,4173,3274,2712,226,944,2139,1462,4756,2174,313,888,4887,3559,2831,3574,4966,4189,947,3155,4723,1557,2086,363,3572,13,4259,4410,1614,2983,3533,573,2704,2571,1020,2460,4154,2533,3345,2672,3296,2422,4541,1042,1571,3444,3105,1425,4662,2465,3326,4488,3,2489,2350,1721,3521,4751,2639,3809,3622,1750,4187,3876,1390,694,2324,4222,2745,765,1924,2542,1631,1207,200,378,3892,596,3730,3395,4715,1592,3145,4049,3273,1998,1208,45,873,3482,1792,1440,4243,3805,411,4566,2041,994,3739,1092,3806,4351,4578,4877,2599,3625,4135,3495,3652,1303,3888,3686,2123,2025,2271,4270,3969,1959,2249,3603,634,2340,1920,2225,2751,2619,4424,660,1235,1894,3137,1251,1752,526,3398,3339,2710,4445,3816,3406,510,1694,3441,3190,4784,160,4716,3116,3907,48,2881,2446,3194,3432,4409,4473,4941,1806,3999,1797,2235,3570,237,3185,2753,3312,3828,1045,220,3227,4848,4623,222,687,3511,1111,3782,1488,2131,2681,1733,3724,2677,2764,2279,3453,2066,670,3852,158,426,2866,1836,562,329,254,1633,166,1248,1954,1034,3879,937,4620,1785,2099,3021,1374,4963,4974,1341,2548,4740,210,2555,3074,3249,1624,622,4850,1989,834,2470,4918,4636,336,2844,4364,3035,564,2795,103,4015,864,3551,2967,3766,1253,3567,1442,4274,2212,4408,3960,3808,3568,4853,2198,2640,2011,709,2284,3692,1997,4668,4999,2755,235,2662,1489,3994,1737,2906,2116,2788,2290,4883,2263,4553,83,4232,1565,1977,4548,1968,3900,4020,3671,141,762,2410,1815,1993,2508,4764,3023,4534,4349,2816,3485,2709,2882,3717,2219,2511,1888,988,1577,979,4389,1516,1772,3966,2265,4829,4297,4888,2318,823,1590,2426,1863,2956,2476,115,1036,2247,372,446,4533,2393,4021,840,100,4702,2329,3845,3921,3608,2791,1510,420,2068,3913,934,535,3282,4028,606,4726,439,1242,1222,4610,697,2033,970,4571,3409,1848,4280,3690,3626,2435,4821,3512,2501,4658,493,4994,812,1702,2167,665,1286,1964,1423,4521,614,1282,21,3346,4864,3849,2385,267,1896,2360,2316,3719,583,1912,4831,1620,940,4461,1841,1220,2176,1165,488,1359,2364,3597,1018,3839,2491,3297,2230,4099,4423,4045,3586,658,4899,3539,2051,211,748,4712,4809,169,2207,1435,3854,4251,1486,4795,747,3850,2850,2730,2630,856,1317,2701,4058,2361,3280,4506,300,3725,721,2576,2067,2648,949,3311,4215,9,4444,3784,3385,444,1536,4247,2963,4083,3621,422,4499,1073,2359,3970,236,4987,1960,1297,2545,4512,112,4524,3342,763,929,3780,962,1261,4082,2390,4168,2239,3403,3952,3868,1996,3741,4515,1184,3142,1561,4910,4163,1118,571,3399,2784,4159,2188,2317,2445,4808,4750,4011,1217,3658,4412,3967,2827,2723,4451,3090,2636,1545,1956,4684,1913,3365,357,2606,3123,3162,1246,4057,303,4114,4834,2719,822,3606,816,4308,3743,125,1180,3358,1264,612,3846,2773,3256,657,2691,4371,2594,3997,4432,294,560,1923,2354,740,3555,3634,2453,376,2657,459,2403,2936,3070,3528,1192,2000,3375,1475,1392,1434,646,4992,1972,4076,4777,1172,1902,3777,2080,3764,2091,3811,2356,4477,1294,605,3618,2830,4813,2450,3475,2048,3742,2474,3151,3958,1943,3124,4685,4708,2423,2418,179,2248 diff --git a/data/molecules/train.index b/data/molecules/train.index new file mode 100644 index 0000000..c4b804d --- /dev/null +++ b/data/molecules/train.index @@ -0,0 +1 @@ +167621,29184,6556,194393,72097,64196,58513,36579,193061,26868,177392,194161,142964,22790,154794,110604,8331,7811,24561,57314,60990,132475,157815,6956,147127,52124,187700,170363,183848,142853,109974,57787,117757,154472,72926,212187,1703,198916,211240,41853,183013,110785,89194,72842,40758,56443,200145,88236,26793,24312,99595,25353,94104,90165,158263,69342,211583,11390,191294,120435,140568,32722,99230,20656,144714,76854,217423,164794,162141,94800,151349,50407,184699,18233,12012,173346,59742,202655,75861,20916,61024,26476,99647,72869,118858,166640,218657,95638,42638,97040,93132,54921,175682,69986,183977,179187,169878,18717,159680,166455,44862,140021,191136,64175,42834,121178,99471,70765,167772,180397,146001,57570,179467,85008,201408,203423,14663,60043,215430,8414,211037,82694,105162,70186,17350,55307,148682,188196,82490,55738,171819,130870,103712,168519,120285,37452,69436,36603,64651,195294,147159,141289,68876,195825,153245,112311,152969,104700,94895,57493,36262,133569,129372,23831,198123,12351,28743,40066,164481,41938,207638,178384,110666,156345,16653,100864,100039,156208,122696,138704,65906,145024,3009,178332,188932,30029,178706,140763,196838,69946,201483,168024,89174,29242,76939,113971,41460,118940,850,189292,188659,69045,131225,199743,46832,133085,27894,163918,78235,167496,133080,159637,52143,40065,98019,199887,42349,141394,204112,139029,149,157009,84975,128085,5105,29325,95153,218016,211491,80612,62770,15184,63143,148729,20645,22453,191865,127399,213915,18143,199387,139645,200758,32967,33657,172949,124592,144127,43287,69483,138326,159014,110923,55521,141373,197988,191347,180844,52730,186895,81714,104593,176078,170361,97889,114845,135679,118354,31720,64986,58903,16784,88627,5514,154221,145207,60323,154256,57728,1885,18610,185556,165439,15433,60015,17668,8234,86619,18574,134782,62391,73001,175368,127248,56160,141356,34684,189622,149695,151050,123907,63700,205683,123987,211680,106708,49914,24726,25409,172748,112997,92876,111038,107767,122427,191122,14200,176518,171299,169392,25799,15889,105544,190896,88946,209870,28644,65183,50224,49862,140584,117601,36747,110593,48100,73018,121275,65485,19761,116164,211818,144264,25666,13261,170955,141711,219159,3868,24448,197542,61965,43597,106539,127307,126185,56032,105130,15370,43158,99345,565,102346,69521,205539,205816,119277,74776,110888,182607,191497,205353,145691,173505,188326,127577,40579,49780,77780,57068,15331,151828,192869,142133,15979,196077,82209,14985,13144,153138,124987,131819,139231,41270,14910,133124,21000,48712,17962,155984,17815,177002,61657,105847,31427,149336,64543,151760,155849,10418,162367,21491,109897,172326,153006,148170,137044,82934,68358,53545,175564,187745,82361,62570,69629,103752,34308,176079,169214,78642,119858,82883,197096,19016,2441,120136,162833,147585,26209,19204,140937,55877,132614,69520,34722,91490,18033,64037,96869,74707,41352,114867,218561,142401,184428,79303,160347,211576,171435,138658,2050,175076,214198,145385,78480,173903,27154,35203,69328,30258,28058,194620,40749,71394,73860,158552,55215,188117,89884,53371,180223,166261,69201,132489,128065,65829,13316,24195,166273,111037,217408,72530,11557,929,87439,202144,34293,167015,68670,42357,194309,115824,144619,184986,112115,147038,2534,29327,19724,181146,39073,143023,9444,218784,96787,152701,144841,38821,112666,33409,10965,80808,95591,208698,10458,93797,55070,178799,65412,174832,26946,92714,204502,146770,106529,162702,196471,40515,62059,42598,209685,212538,46413,108080,6497,47018,193085,87080,205097,107928,210301,175612,192690,212533,65055,69941,41732,206405,183835,28336,100281,10151,123388,58309,52316,214063,120665,91660,80003,215098,208495,59662,58438,6203,173023,50627,104455,86051,73034,18198,202722,73170,92050,168160,133537,104773,178131,140565,86808,7235,30236,68475,46810,152198,69590,10028,28417,156387,113918,90619,190983,206157,82228,114398,158914,134066,30315,100976,151149,49828,66773,11635,185803,114309,443,136293,211421,141151,180055,188594,194497,193210,175801,51651,95478,113062,18343,174125,86559,163356,82291,32669,188679,78727,132939,81077,174821,107057,85506,105486,182769,77504,145335,33367,50289,110217,174307,99390,177554,196118,45620,161353,149187,78892,106450,143638,218556,106,79658,75212,55098,112692,205981,152039,159032,171627,84475,121893,115811,115909,177111,56020,134001,124042,208072,208673,192927,44483,172713,22228,74392,135122,174025,165921,162335,87867,24480,214544,196906,61569,176369,81374,58888,211435,52200,38627,6402,12114,64184,124554,160241,201454,19091,119384,108643,165089,150908,50970,188310,182545,100657,129598,104766,63959,38684,171981,180256,1453,196860,201862,27941,204058,111449,57367,46107,210792,182429,135779,121778,13164,146120,65325,31813,119658,34954,210086,121803,175001,139233,146518,156094,83177,197984,116017,160603,213649,188553,132324,111867,217728,143621,116893,41722,194944,124433,117983,67945,197073,64812,167159,72695,200753,203862,136655,127034,164298,62716,71984,115309,20311,187051,74901,61471,71228,88040,83809,141597,21122,36273,39539,60623,100410,181914,40056,185183,56086,16837,108755,106849,86738,142242,122139,108992,16322,54220,218337,110138,102099,201797,153112,182327,5120,200696,150913,99714,125037,1545,92211,78279,197518,102232,219081,109843,141091,195956,192579,143165,209680,158139,57812,127986,57520,71548,114251,127308,7608,101936,88114,175341,178032,209228,105989,189839,43265,122523,33456,163120,140017,7069,103290,155161,147951,173801,7104,22006,168492,112358,35572,121031,47639,13181,68198,99379,85813,55485,119196,85680,88473,199551,99385,72943,197134,218083,110510,66131,218866,21471,123288,5081,196352,141405,13653,91740,58778,170431,17987,204795,170853,10553,197717,8134,64823,52261,219998,5342,162879,39946,62533,33088,124141,175494,29986,147841,57138,121905,183360,67172,201037,96703,43984,158831,159186,196064,188313,30024,203893,214774,42930,81536,28337,151698,6731,81777,150941,177562,98384,103980,187436,51990,19922,155214,181040,217729,164427,63661,26712,182764,202501,79058,179374,157394,211164,31733,208726,148355,205163,10766,91017,139655,112296,173413,97142,18076,132634,169749,89451,3316,110115,215569,128503,27666,113645,94945,166614,217240,120517,185416,40105,114160,46173,192360,136772,170513,70800,161457,211864,141078,203067,126745,121863,114183,216461,191634,155313,70358,84490,64355,217771,22718,73119,118175,63927,196732,121820,149382,159994,175161,99349,88184,7523,129579,85200,47668,127808,55605,93015,209146,67725,89217,73310,156278,183811,72422,145697,2661,135430,50084,22442,63270,188763,106542,128077,145536,198748,62999,181039,124804,169319,186605,128665,117486,207862,4520,24393,77132,58090,106011,181347,63780,80270,174053,152451,96736,124062,145089,139177,90113,111543,195542,144281,86714,92225,184249,118946,71019,80379,65903,60434,31629,189080,50484,82719,31340,194741,140473,199803,180922,48535,50211,56723,193620,126929,72482,189945,154556,199283,137530,156445,74186,26352,218268,50886,77659,59633,94602,47039,79237,3708,185602,140020,33182,71909,11931,14293,145059,76581,182823,33103,167213,197339,128680,26892,3215,150487,74537,123049,125492,115466,89314,48329,13468,66185,125233,29907,215512,17129,105043,128908,19420,151262,165005,179949,14053,39774,39111,212636,147545,79648,22329,65061,31051,146295,200394,109097,158942,156271,207287,162114,59162,203343,136989,99713,118099,116056,77949,154278,112415,80053,149062,162798,15789,159811,194009,26013,199946,54470,163975,55318,69375,173127,21282,41171,62879,45564,144701,19677,41034,701,107090,118096,180714,155664,123184,76351,8556,60680,75524,185324,74112,184283,119021,18659,180193,61190,69351,206524,207420,163855,154609,173325,210739,51857,111447,30087,142753,58932,169773,39056,69633,216696,37286,18719,15633,43495,207782,80638,155987,196334,216069,149214,75657,115121,32598,122866,180532,79715,183430,105515,71367,131195,141552,129445,114755,21087,156771,10449,113253,192553,84494,158259,65632,6780,23940,60011,176763,219141,150784,153910,5438,200477,176234,215330,70650,151059,10546,200044,198251,45925,123338,136043,170789,115927,72917,47576,153440,114264,166405,213449,128902,23918,123210,91215,107046,87374,84163,175671,27420,42159,86456,107910,181842,129884,75554,173692,104976,213271,199360,144204,9619,119229,23084,82448,66164,84744,30388,202518,105952,134898,216249,301,172401,142237,121104,108330,14209,49173,135905,94838,163219,198297,130676,163947,115878,199226,13529,53361,70007,143974,34344,75505,114849,183042,127064,31831,7567,165156,159612,209535,62730,186065,41517,81461,144399,3584,144769,106952,24434,29743,120965,30793,169793,218141,40362,130646,187853,76509,133397,184919,71676,108918,218817,126484,123789,63892,119743,144510,37917,100554,49967,157123,133232,195636,35784,18300,72416,202552,207095,108774,89096,206495,133100,70037,215102,672,74144,190329,78264,219539,153862,152023,172999,128356,38953,117059,141185,126967,90472,87147,144681,199982,142456,98883,119365,84352,49454,182845,62601,149893,100393,61226,203304,107682,11441,83408,195219,184871,212705,99937,101208,173982,207721,215154,170922,39873,129847,9704,33094,131672,154712,87030,26309,115423,26138,137874,119780,4023,189384,37789,107473,171646,40464,19615,123074,204872,69474,88751,163383,181588,104197,170350,21054,86131,176778,139885,99617,83010,164311,188407,199072,127912,141824,9410,161863,17936,61543,165455,179411,75334,59634,195760,23692,113763,25806,199326,166133,184520,26347,116307,43611,181928,78503,7588,12056,85032,208704,14711,76904,93969,98262,112901,38160,64014,139242,108016,148354,178729,207754,47200,44560,45893,20701,159774,162453,179079,63132,130460,152871,37517,60866,120887,167222,66578,120473,66932,174808,2463,210929,121963,75400,177631,143287,41412,19362,115796,90587,154028,78421,167493,111230,180961,65561,119756,79199,52223,100845,126670,27958,62182,99972,149926,94098,150683,77561,183311,77392,5751,217589,172550,103758,71953,2122,179778,204029,195210,12856,158965,195342,130214,218323,75024,203355,209416,60324,159138,210221,92358,57410,166885,49841,162762,65700,177671,198070,188987,201187,172800,178482,219765,35855,164691,25470,164484,169434,10334,80984,206729,115559,8746,151931,95646,191983,34448,23627,77361,85647,195947,108921,46042,52637,34644,206179,141402,95907,139159,131542,71441,217703,43134,67363,216187,126313,211416,77369,195706,88792,210824,30191,122770,19739,36898,197696,59143,177294,189849,176791,104179,210917,146096,95885,23672,207272,103435,3648,69326,140659,32398,119219,96622,176377,196348,176250,68724,153238,99883,215591,167391,97379,28402,176904,61295,123594,6560,162410,147169,85984,159929,58030,169778,16571,166567,215968,121704,183783,79217,170194,107032,30585,36641,11887,9754,79787,129138,30440,25478,61551,140914,35563,101881,118919,97258,175763,194809,182579,141608,109871,153967,194581,190473,40507,108759,171683,25957,218547,128279,161389,106985,73332,8576,180952,97132,56955,116230,116574,61895,95075,26045,179746,96294,142728,169037,94024,15872,104369,72320,49757,32023,216056,119201,24031,173740,55602,168218,167639,156538,5598,13258,206253,87427,63850,33010,206301,148000,53793,17985,217531,200581,145282,54305,153716,56610,213136,61079,86129,202996,38680,206738,156233,743,72685,37929,34076,141614,65707,209325,45743,28816,173292,6758,34551,3895,93911,207089,206682,62372,154364,84874,4136,45677,69564,13736,33228,194436,110352,137910,29792,195471,16661,124845,117512,203952,94906,134542,155625,28587,118490,132078,58077,161246,11367,190642,205318,172727,136695,79070,120073,168647,8165,15945,125562,105281,111745,179856,28301,128521,186751,116277,19265,21178,84439,159461,38884,17218,33080,72093,163661,165957,153449,143748,186686,85245,99851,156602,139082,77305,118938,132527,158709,112774,25999,207905,183967,29992,171629,170633,201578,144529,188963,56367,112740,118372,59898,108478,88848,216902,118882,104524,109049,191263,24926,81932,111873,81926,174354,66817,98120,40013,180046,124326,17598,23914,218044,22377,24439,113213,25313,195189,193670,97687,212800,34108,145849,15723,153738,147216,147241,86414,175639,32042,107691,92693,174415,196682,110870,189021,13486,157393,81906,92181,27158,151497,133015,55768,40561,172159,126403,58784,28368,91780,145821,96353,30116,199912,73024,150496,59286,211608,112489,147053,201177,214545,162981,160844,176941,168479,145945,6882,159644,172446,217438,181789,70109,7589,47294,71636,184208,199861,80998,89082,92018,1600,47554,37552,148457,172318,105063,18241,37191,194240,165982,8036,24053,195588,139062,56395,98617,110056,118920,89363,41263,97007,81693,189162,85035,203642,148791,156296,22270,13791,40784,41264,197770,161962,13044,176676,21386,71330,116159,173523,111152,127313,159141,115879,108575,71609,56510,197903,134308,29836,90484,112699,29069,74251,177790,177861,155486,127567,138132,174884,80850,11905,57807,103615,157109,14360,2015,53588,79015,55373,201156,35975,200320,66982,75876,86015,31453,2026,130386,112893,46057,33865,99669,139613,184463,60322,131140,146466,218458,175124,211340,92842,18894,104089,194521,11089,114352,4912,120521,20411,82066,150931,112534,150313,106017,185990,167809,109495,75894,30192,106176,5467,85136,45055,210061,162037,120611,218110,180777,94857,23102,114481,27751,63783,114209,154369,104975,137353,20616,103747,81336,195508,89001,58083,87311,204241,44074,20022,133820,165996,29891,139094,133680,50830,203483,91586,92038,190678,214682,169199,213657,38716,61936,26949,38389,67100,51713,45481,157915,40073,199285,199007,171837,19753,46437,202597,164613,129529,121622,197773,147779,199197,151904,117677,178555,147978,168556,166539,163714,84727,164421,82873,39572,115281,17900,122924,115922,165513,79382,208753,72004,155037,14725,92259,132995,19448,81371,121043,118466,9858,14913,96662,218024,75255,20114,168995,23670,161220,155734,132924,100789,121297,152129,145310,207570,193703,10753,117894,212288,149856,170875,49345,84286,158598,124683,131430,39547,16219,118101,27132,212590,219513,90030,187253,22106,132264,169378,45235,10260,64944,185498,115191,137381,137035,159856,41614,95393,97740,74158,101558,107156,203021,88700,177968,156732,13727,206891,165383,169687,87726,17278,86426,24774,146266,177833,101335,74487,66059,189722,172114,157994,39419,87388,21365,152761,173961,37074,91703,81325,171980,183102,173574,102749,33803,155986,185816,22208,81155,98744,168687,207529,86108,213171,33500,175666,184242,217306,193665,179677,137990,24498,169351,175756,110993,133297,94851,4779,95052,80967,47254,56149,89583,200843,127473,50333,59386,36061,40617,20228,77540,206600,26513,133087,202080,141497,218921,193631,138027,9895,173520,88280,200882,162013,34353,156564,98754,40436,42532,47407,217932,181625,202138,163771,212085,43395,189053,114733,11453,107704,95498,177304,188635,62267,116444,160063,74692,197234,196224,205468,117683,61342,139987,62696,81125,211975,205617,122949,218919,50860,96427,177814,149532,115499,121026,201703,73864,203980,100105,131833,138270,109749,42482,214143,52349,210045,158508,36280,65536,13678,168081,125983,97311,145369,26893,186489,135242,32683,74727,21987,200046,42024,71510,117786,134572,38626,217853,114680,24042,58205,214093,118260,91641,6991,108762,13960,103928,131598,98031,61807,101231,21392,98281,58861,7389,83541,25974,219985,187355,170383,87894,207455,38285,36070,10040,75203,217517,123851,182471,217870,36383,198925,184952,122974,117598,161309,1376,20768,4981,67088,56532,218880,39195,143865,190640,159614,138352,110953,29146,203468,62295,78944,31941,12517,62505,110054,167497,208115,163266,119829,16464,29060,219332,131027,156431,140523,4308,165663,135102,150745,63422,188362,37637,76355,112522,410,161168,92488,63062,149596,109200,49103,174160,175168,22443,137228,94530,17741,137903,142627,132992,206153,133070,145267,5330,102355,123243,11412,166637,101418,97858,66492,195904,4257,93610,206705,17710,90387,63209,192136,172255,164693,27174,202244,152542,192724,198358,87167,34972,11622,92353,143134,88748,213256,168505,45898,217680,204247,179485,121798,182292,125444,165605,47783,212729,35350,16541,187706,203473,119977,9695,76924,52845,11483,207384,52289,10985,82731,81284,135104,104392,213753,142352,124098,66404,9599,197470,169581,50096,75002,93578,204692,12531,171896,87012,71622,32630,209542,96474,114544,104863,194853,115274,101356,88876,48973,130076,181421,130422,96297,209005,136125,69919,210016,21656,190600,111280,20690,112881,157929,215571,47305,142992,77009,84203,26895,20989,85962,173256,77500,80346,116894,157973,188131,111703,43684,180817,116367,92155,117212,11103,190530,92421,161179,114017,71973,167633,207481,15036,19671,175946,167120,106485,95239,134502,210002,196581,178103,41939,8163,37428,159271,177756,204816,114864,9122,33090,17624,61864,204083,169125,96030,94935,100362,148699,8489,158621,40222,178012,117944,97246,97521,116376,200038,20155,150450,36095,138794,96193,104342,82414,170278,73056,65434,29739,6793,192852,48771,130821,135723,101446,147239,30870,68623,203152,68233,184559,116987,56248,160399,74862,181922,128765,52475,32148,35580,19412,118500,45260,187005,116683,23013,212471,178763,83778,175070,91075,186001,17023,144178,142126,76116,78631,41289,186596,186007,183558,167169,45636,208062,94763,133367,58783,31814,52685,207635,36405,62058,207191,129504,6890,94606,145233,150019,96707,122527,210675,144610,34018,160418,22600,17235,81078,104375,187939,188447,125479,137807,107687,201631,107330,215833,150706,19333,32853,83106,168385,19441,117958,122115,178286,135622,90312,33642,217331,204541,144520,167718,154084,47700,201269,33825,113408,131822,14526,217944,32548,135817,40112,79724,43163,42458,84583,185947,59105,90707,136025,74461,20672,65694,51459,166476,71945,32812,163845,79383,161068,139848,24491,131746,168031,44206,155139,152212,40421,44879,172682,163703,188909,158633,88504,147749,10798,216330,7437,21292,11917,168153,202118,151197,69377,170689,55262,201053,149982,109205,162033,167507,7939,130557,164407,143019,75935,168272,79192,126601,64193,210944,211641,179566,106454,77970,118893,19129,180515,15705,41439,115239,108952,126943,121750,53483,89179,37661,81941,188314,83706,192466,90536,104521,34279,199403,97134,134998,147233,27835,83654,63370,122282,32118,70116,117858,64990,36930,25378,13265,76077,100702,161317,109611,65082,41864,213248,85835,151437,189039,81976,49763,200047,41770,130617,134932,122372,130746,80882,130382,6075,23605,103086,132499,119860,63105,56398,152920,92509,12755,13248,73755,129733,156635,171510,176297,123361,74907,140690,2143,28179,112965,35110,69329,190711,95886,200240,105710,95928,11860,105010,13410,149495,147276,51027,95033,75655,19275,101279,132187,118057,200383,144141,73327,216350,163379,178362,160181,31145,33728,25344,103259,97831,208377,88889,146232,95841,197909,37818,52181,157932,133463,105221,131092,10552,11876,10201,35923,187063,87304,210754,124189,136139,119796,39043,158927,135136,36642,85974,160712,83500,103010,161611,193808,78442,155641,88178,132989,217131,133618,139549,128410,185791,147567,78588,124490,213843,4361,96562,86832,176634,28732,109193,153014,80655,208864,190139,180366,164956,6985,156476,124410,69607,171859,205233,203466,151643,151423,59649,189110,13462,152967,125946,44698,137424,164895,162460,203058,99664,38738,215393,178448,63490,8276,150095,183637,28808,50012,4964,115615,82232,109780,39696,108255,181010,53448,107577,131548,203154,160295,123628,192895,190684,16326,185014,36197,135969,54353,147033,85209,173598,125402,137750,98782,82248,45398,120498,139756,89738,143202,92874,177174,202372,188986,178453,168639,210672,181747,69300,159927,126924,50400,64623,73139,146299,78249,58947,78017,202158,75760,184785,54336,180942,184808,128189,83110,125777,91436,146928,208510,189179,71685,75442,31948,150333,177372,142394,99534,103424,214532,90474,202654,210925,38397,76140,11027,187207,20717,90797,115966,171959,67225,195910,125587,56095,52970,217042,141184,71075,147329,182497,71203,35985,28633,161396,194220,153834,62726,63558,13307,175592,139209,59158,167233,61036,13760,26266,108344,86584,188051,123837,26357,178570,202051,36036,1358,144271,41403,106668,171151,124701,125160,170534,52286,198157,75299,84192,74819,169318,15509,201516,23451,170996,150505,60875,140286,193642,189560,9777,45892,109546,219573,46081,9526,219205,104069,206559,129861,48840,196346,75882,9821,2429,78221,148887,158015,28128,87829,74596,119104,168116,142422,137446,129476,35179,132176,122688,71522,50544,212025,29529,86686,42596,191658,120301,169944,67440,188376,48810,3692,193099,88371,207294,77338,148840,176790,198517,50636,46004,160158,167438,106296,216790,112119,135077,85964,22737,105076,175441,25004,48439,36860,125161,84830,65024,1790,68363,100551,61740,117109,197592,69990,86409,79153,152688,189499,150176,2997,68564,171359,94254,181605,61945,16285,175056,30991,122000,80427,41868,106277,179850,131732,184433,201809,81521,180955,30779,167434,77342,96349,161120,57894,57443,35014,125352,40156,119269,196014,158790,97943,109011,184009,143978,123424,198534,140907,210238,174088,216427,57255,199716,64871,178347,197850,156189,206404,21461,137758,117112,138457,184488,94839,20474,147893,29414,16189,217258,143562,132510,52994,150130,140682,39282,43132,86014,136404,115825,30471,178335,53844,187935,152868,128143,23839,133791,116780,211993,14584,118864,34588,134544,108901,119746,147825,15135,146485,121209,176362,211528,80784,189603,5706,103795,66704,214028,790,57121,151600,19144,11872,111069,90316,183601,16714,141862,15795,18092,123750,8319,75259,107182,47181,201581,35501,200757,168171,191178,169313,110195,98162,100264,117581,98958,98473,21037,178973,173484,141374,34862,171320,91165,31098,46785,140839,103037,138632,33383,190803,58376,218843,910,198227,5959,78214,121373,176553,188506,142723,111182,139443,99425,215838,60205,64883,120750,90743,40664,72259,49409,190060,200668,29581,8445,212199,173066,109901,161198,200816,4101,63089,54023,17639,26449,155698,8801,117031,156705,176166,184362,12760,64155,194128,11596,105451,115071,61430,141494,197757,203440,14796,36735,132093,75828,61384,213853,191762,151023,83598,151381,156740,202516,176300,214534,84060,62102,79105,37554,173070,136642,57945,108412,78774,72129,145940,155101,192731,45910,164100,177930,111988,145712,129924,12290,90286,168641,175988,99917,205993,137484,83554,182533,109137,107022,39116,78613,98686,48202,197765,141014,124130,63168,59072,78836,185487,37840,211229,121379,15105,147426,108180,109185,146089,138934,35151,101821,63663,66845,53303,86576,169530,20727,117908,97275,24238,140413,49933,13510,70375,98872,176599,158589,158052,10367,19079,49325,210934,199674,154031,189872,176016,146499,56889,125670,54759,87284,79478,4019,55524,49813,194373,30753,195789,197743,125546,63594,182354,158440,53611,103974,62739,144724,84309,203296,74202,99788,122224,139893,170058,94177,80851,68566,94278,134431,130358,122160,25849,210453,189376,199656,83947,53248,97178,82030,108509,11984,147462,58005,194314,38252,4251,68360,145047,153100,151818,188746,109465,77382,39953,51430,86472,60298,99572,149366,218461,64416,130888,144347,171601,179528,88490,67456,200110,215375,128089,189676,168216,193880,128606,120803,44101,192091,208398,92320,44371,36689,188825,143273,128009,48247,142117,168246,15396,137348,8866,219956,19485,214432,12717,200366,1673,108106,36042,165334,60668,17817,185380,39591,2396,57336,132574,119540,97832,15846,161961,167176,174785,161616,126669,172827,127888,4123,1733,139516,107829,3106,4384,138783,189318,72021,140440,75153,4493,131652,213164,182939,176693,112872,211332,46993,28053,25248,137464,39032,50313,162471,138007,66169,213703,92914,70077,208387,104044,20714,97793,106437,120323,148111,63755,182776,59207,78636,179317,217071,21133,171372,170365,199120,8305,24510,106241,99421,144887,124781,14710,166919,2477,184246,44927,21675,131034,113746,169058,205619,86840,148335,24998,138453,11154,60231,55421,181479,148579,124649,71065,12205,19489,179418,73498,147720,172519,8625,47016,82440,4104,153887,37798,197198,215139,186833,215194,104410,20168,78453,42585,147838,63038,148555,218798,102015,177480,141705,86960,100622,198078,193919,36687,207224,180698,189835,20559,131279,195416,90664,14076,25607,114580,60855,219853,19927,89284,158641,201590,160921,156135,103993,202782,85557,7766,166231,71651,205402,118109,128608,59805,93273,144975,98415,113206,48690,178177,153583,173191,99633,22464,202419,162166,77431,209205,64605,186937,19269,21505,70247,40428,99614,186561,205492,166173,40442,193896,102137,83015,94555,27931,23991,1361,80810,116584,94350,199582,70717,26799,34735,22884,49123,112978,117661,146010,145421,134333,106966,27077,6884,23493,92776,145285,24396,156480,156773,205635,85047,100987,3235,76506,108477,101659,204254,22203,190013,146679,63633,149935,136604,44455,179651,99806,44433,36396,70495,78939,70361,129269,38255,16585,43832,113947,72341,110389,78578,126938,205171,20164,94666,65981,64642,188979,163848,129971,155662,161656,51261,120025,28042,35517,79864,1774,103728,87088,162579,99585,210783,86387,115401,87749,112883,213772,215884,170665,155816,35684,78757,158128,182266,52652,125569,46566,104396,83660,76401,192642,182179,165911,128714,150963,204989,63550,85429,98580,73479,214039,206010,103005,95686,29855,147810,52376,155248,143033,47166,201226,144205,7055,190634,121011,185694,54958,114938,209108,76370,217915,181622,18129,214686,208576,208416,107254,176417,130889,36484,166025,79385,63444,66238,172664,40407,187494,110920,206851,98825,19389,117720,156744,125628,152631,105068,140081,132805,181327,109655,142513,9637,207282,94248,183929,212938,140746,155666,167295,22277,28670,201576,65072,172386,174245,93063,43604,169607,160609,11612,147644,169091,177595,169539,104757,197311,86926,208727,112841,27772,2765,67826,58196,133870,195783,135999,146274,152179,180267,150794,57942,116858,98073,102533,121389,202464,178141,154295,180993,131194,39516,90431,6438,126555,27406,77445,109045,22506,30426,217924,191404,37139,91312,81760,89932,119362,205591,54229,136607,126596,91304,124823,25591,114862,189266,182652,118606,83600,17609,78712,11713,212385,186326,30168,5920,89935,169861,28668,177508,209728,43084,194114,63940,135264,45731,144679,41978,86766,146761,111770,121124,60779,209126,106180,165087,48352,48860,167897,172309,113281,103699,7652,193301,160984,51727,117853,155242,112604,101930,1166,184742,56307,53945,72966,197217,184486,212003,208668,16400,151348,26673,209584,140742,49010,95890,85454,51661,119944,29872,68774,175820,128315,138337,167538,82157,156492,101879,160250,102903,153879,29945,91206,92270,161837,45227,213316,176577,185070,209875,77973,161083,154709,22217,176189,35113,82099,30896,62540,30657,47984,97859,181780,37056,133909,101833,109638,156349,36141,150681,100587,110897,48694,166592,140870,181779,169151,45412,145467,43742,75716,36777,49046,82465,118227,162374,94119,2512,127037,35791,51151,100648,147215,132497,171198,129753,107252,179489,108955,186159,116353,128109,43844,21671,148215,8089,208635,199506,57810,76569,8570,72051,58941,140930,75673,44094,204666,119884,148281,194646,200781,129843,144107,133904,29677,150133,29938,70225,203191,142372,215818,96170,142093,215642,198386,10907,199230,188476,115701,143223,57289,111451,26847,192935,217598,171356,196960,64888,78042,8449,68895,91377,214100,22802,115099,30772,203327,212831,62113,55018,212520,193360,154120,180652,91915,186319,159976,165877,112241,43511,161520,36499,205743,54164,213261,54248,211141,15560,149441,91964,139534,73684,156462,141234,44139,84843,184563,76712,75846,150139,70082,134850,178074,213694,24962,35543,196632,205964,107897,15541,72698,171939,33262,183874,34784,65451,38336,186487,84435,217181,64894,199615,177270,102944,128308,37269,151368,165780,70442,164577,108586,98349,118486,19796,166101,208299,201558,24404,105604,135164,196556,72935,182112,96749,119195,127937,85796,152556,648,202934,192079,24416,191609,120179,166419,174101,182790,93905,16601,208582,140096,104258,57102,112628,216228,55535,129686,70395,84637,217145,74342,88540,143474,151501,34234,148410,127228,206649,90109,178926,199820,12690,12043,26102,164149,205491,219336,120476,4268,32568,41414,115680,119673,177,112145,53036,180510,34580,170392,79132,41674,71832,24468,171596,94485,65638,21606,97476,174196,170108,43391,13575,103898,163998,80087,190313,183354,197979,61379,112520,171831,23907,184470,24951,284,55947,124992,20431,34786,155629,59294,136169,178253,116671,2434,2113,182346,90065,214200,31612,110707,181936,34648,125226,18567,60017,100419,23417,191565,211276,26850,27053,82143,96354,78447,35947,100328,202758,213219,197435,34903,168185,177507,37448,17933,139069,147663,2221,159991,170398,43022,115369,92061,189381,55851,164647,196154,39681,107927,161500,179979,115968,56949,22724,26336,36703,197249,32284,154553,191367,100671,92152,112561,82445,207376,36655,64955,72605,168983,22085,145069,157548,157288,189052,159276,74690,204202,181004,7031,172597,79075,53856,135724,159102,133568,49452,196042,102695,77474,169389,14304,208084,62741,129731,101157,29555,63198,130962,167269,155695,18615,138487,3163,94659,82961,34598,101407,219931,148997,110326,95790,143224,179029,45290,199944,123473,201947,19134,4375,153973,17703,3615,68730,56555,10440,15851,206907,104326,132804,75101,164776,186811,131346,201093,108903,183975,105454,21607,167913,140657,140943,161493,40024,72736,21914,81240,20715,134035,53267,211828,40731,140611,85655,102427,153730,166276,200074,218766,170351,178534,168599,17331,81309,183153,114684,191354,62557,15179,64088,113897,30405,118848,160509,159601,14425,174608,194430,172504,194159,46044,31311,3320,185571,35969,183893,2975,42947,130131,90886,137102,135586,205255,215615,188944,67960,43768,97483,33274,196549,204766,70454,192134,30947,202225,7695,87821,211578,112544,71298,137325,16743,68930,186731,151165,164503,20203,130062,119327,133898,94404,14533,130969,148605,43746,96387,41237,66877,202723,26975,150447,176716,189472,30234,59874,194191,133385,673,11544,3221,64107,12053,123670,95414,100395,39343,46913,9270,144930,210531,187176,171888,109935,58855,84358,65080,109119,188819,83869,71108,216530,20417,149824,97729,31300,131607,176508,13807,47014,58506,210852,135207,12039,105100,18068,121921,74005,202775,81447,85799,22645,144989,119733,2089,96709,52604,76439,147798,205653,79668,193875,163241,63591,121615,96540,155108,129331,202535,203393,51228,192010,143277,199355,208659,64258,39470,1671,107488,6267,61078,141962,90812,166708,181786,2515,87822,383,198948,172308,98638,193483,190635,27249,53790,138887,62434,110052,128982,15701,37322,186555,73354,24386,11508,60679,136251,108280,183722,97646,120182,196057,22256,152245,25054,132494,34999,167042,214948,103376,19616,148850,16327,114028,173655,34264,62068,76461,68417,82577,209929,183691,218077,192768,85524,83376,118687,71291,61239,19578,53139,35680,204455,201889,153277,28566,40739,27697,43089,117851,122173,82321,106564,31650,140489,93922,201939,54430,118467,80821,121463,69470,31434,23703,41317,211706,179845,79748,216632,184734,182682,158152,10572,56366,219132,85929,38882,23910,186425,65069,93255,103873,134600,12832,177519,78339,68201,214981,45578,8147,106008,118541,145739,196104,144210,65497,24831,121121,26714,210736,208092,36645,31838,3134,16166,217677,204723,58271,34266,51939,215921,97580,179905,165228,169000,22030,152652,153647,67848,213056,19657,5714,16900,51839,169341,116242,212152,33761,24111,217410,212044,86913,32119,11192,120847,13274,44037,150440,113549,215399,189905,103546,130056,7728,100330,179100,111847,45300,92784,56286,49060,72084,73362,116919,38962,9083,162313,161736,64431,169680,77360,130781,108001,144359,125575,15985,22995,73341,100405,109899,168467,208980,137839,65312,165348,210283,142516,208364,5119,100269,204974,94139,125746,142742,204848,127055,90186,148282,131699,84306,101811,70893,47137,6832,83591,156530,57414,7681,212517,204089,73644,15359,211983,123649,138787,93698,201023,153089,61017,41844,26249,64974,172193,63216,69868,139903,213984,207418,193215,14656,200291,57517,150914,209031,101822,93009,219997,45537,46163,62010,155325,83362,211343,193489,182878,94052,155074,7394,184405,183647,92191,148806,147746,37251,147615,49344,219335,212441,129034,142114,81016,46021,128564,9951,23660,14724,60980,157722,57359,5224,138005,125301,179,86962,160991,52812,211404,34207,89327,187940,46520,210350,84826,15612,5744,38721,153776,184731,37133,202995,29077,137965,95653,19013,97934,184690,173575,103703,153620,26484,88227,84568,35769,41288,191981,114522,207576,167579,127716,170178,83173,45666,185383,147318,181812,160880,206305,93539,58520,174465,154389,213504,45967,99191,80431,192412,180485,77205,46585,191058,471,184027,150165,102655,202068,148762,8488,47731,157912,83418,212023,160269,57906,167679,148255,27291,130071,36868,86813,195387,20396,62389,90914,83759,44010,166772,23258,185851,175642,167536,191177,88014,117081,2987,69523,54967,65415,181711,17771,91876,67281,206312,28320,188852,335,12621,101171,114876,192042,110009,108532,129258,99277,92073,143136,98395,26588,217266,125695,206760,151533,170647,198768,180134,59135,42639,118302,19322,209504,8758,77308,5263,83549,68453,27453,19451,89602,44592,98424,42180,191885,19661,143404,24406,88591,155721,160863,126546,184681,8124,113068,171346,43253,159204,113823,41183,14039,26544,86794,53883,49803,107290,182765,145257,189277,191358,141590,69537,173171,73799,77961,62271,25143,13011,102996,149886,144090,128699,40262,14288,94589,1000,112094,22971,77214,215979,173544,163863,157342,125915,52343,25424,6746,54251,44782,167297,76496,21479,124143,30279,80832,205750,104193,123455,128213,173287,69469,23890,168261,142639,101587,48465,97260,99993,96836,48911,116857,11696,69021,115275,121912,69048,59308,219435,70401,147913,16158,39617,197193,182575,174972,25350,22467,175149,90122,211859,141937,108524,198300,154593,59641,145123,15986,101477,214540,136991,109741,140381,180110,124472,61772,124849,78342,20697,103495,187250,9103,131622,149741,135480,209833,149811,177611,162600,38652,31531,198492,117713,45319,43821,55545,211933,50521,33078,11187,110985,20543,179077,114135,52931,215630,164423,38765,218656,155988,67603,83820,190605,211640,17566,22813,101737,145847,104164,144717,85492,72404,136233,120103,3241,206189,183608,163330,154029,136986,204914,109994,29496,39227,38849,148529,154123,152745,198804,26267,27691,147504,204352,26474,205978,74997,90143,107712,69241,100412,171648,127293,149952,159635,124006,45141,72198,105637,37710,162069,159555,178471,181839,105308,10166,103754,86899,181014,62279,12921,195493,124575,70407,97633,5303,88569,214272,208051,79444,81513,73491,128875,182500,179529,25747,59757,35228,78769,194962,116158,202907,216484,84113,70626,191050,109254,158591,169228,23406,49266,115793,55414,215998,106737,194234,126998,199096,135397,98018,215224,14734,135074,207484,41673,17201,81559,186009,132853,105744,35559,204607,137438,149074,45544,50531,218347,52619,219408,15106,65402,8669,119655,13253,94470,184715,52195,71821,96561,215877,121692,132415,104056,166568,32472,178565,8011,97639,128132,155724,116943,46552,124408,153847,144528,91687,90242,42864,219585,67806,196335,217807,180578,23695,74729,7195,13015,42705,150014,204427,56182,58025,169706,59011,177653,54193,70538,170809,107349,134553,5256,204551,201214,178834,2332,124281,34685,169704,45403,157585,209941,2218,58659,66162,159473,211048,80074,186840,185956,168490,72188,111098,98690,91412,57202,122342,79205,177522,136482,216088,163474,104111,153219,26820,1926,134090,173632,98102,147182,169696,155653,159658,74102,78243,179414,28338,126284,16734,88154,72478,167928,85232,72583,68458,171997,186408,170778,78326,49377,39303,135605,200664,62993,14883,211138,210334,212344,157295,105931,173154,83777,178456,36681,6672,171809,130383,77490,68007,109724,106063,101783,193129,9258,152836,185616,149957,213913,49785,89959,190460,58291,179914,140151,167330,205756,124711,176126,93977,78694,172389,146341,47483,77013,25586,123412,33539,194227,67804,190103,211902,146546,199994,47990,178182,87984,171556,23409,58262,93135,58544,195090,80500,110224,206593,86852,97434,68304,153503,77083,121572,30875,123348,14071,166303,152980,152125,162700,18668,50998,131197,31409,178440,100052,138893,78212,108293,14172,39525,36009,52506,89828,107267,149265,119949,37545,82938,187608,47279,204194,21184,127816,87196,165421,46353,82159,166330,15207,1235,118503,71703,54888,199542,202779,44198,151068,200404,41532,128339,205838,200165,204407,23079,33922,161082,113694,166064,112567,105747,112466,125147,100484,1223,9889,139927,52275,191537,97806,3690,84466,198403,218425,139207,49958,4696,177405,1081,164282,192309,65227,59314,181803,90269,81813,33798,26924,100545,131946,154316,80246,43783,17114,215965,78171,76238,119518,190796,136390,156334,137394,88561,113682,180035,35072,89845,128497,94175,196638,49394,43223,212863,105746,5013,59878,58804,190622,34394,55362,192433,154584,132286,44298,32286,96099,186413,170535,207651,9794,98354,167856,67053,202178,140177,163351,12736,155693,12572,173531,28341,171328,219183,5175,12644,180813,30353,109255,116474,99681,32399,143466,66253,124360,183990,40141,53869,182240,179762,165232,2439,78935,110328,173451,26471,173665,136395,70772,163262,158763,35956,109792,217750,27502,134108,188420,161925,30863,74109,29887,205720,27856,130479,52074,159646,52363,203937,68942,217972,137139,52867,93827,184401,107415,77424,42040,10329,143691,130116,54445,180408,126003,86665,61474,717,3342,176064,23331,28948,218951,150510,175380,129526,39097,23614,204348,134140,19673,189677,189681,26552,67080,168909,60597,119734,77600,69238,121707,13128,26164,213169,45797,10340,76170,94428,210133,177410,81974,111672,193603,30382,24903,198758,12029,2535,36279,173894,169659,86156,93723,114744,163002,70335,192169,24366,97680,213637,89842,47938,29840,105061,104923,119678,71259,219075,101113,180853,125369,209688,110556,170091,208905,43720,29637,34737,182496,190424,15373,41784,27187,109059,154861,126530,210308,149145,177836,114861,47516,160254,99549,204253,94004,160263,7554,179428,219817,190651,34250,126636,127958,29605,107815,115523,12051,16603,68580,171697,82375,3240,184698,177714,137577,201203,194936,189732,148114,58716,175595,88683,136078,137128,180418,183652,162067,24967,113468,173464,190487,65162,126334,90636,173987,177286,168140,100893,38968,219947,216645,146366,160671,3052,168663,41137,132531,123021,129980,43359,19625,196982,128882,84052,63148,86742,13199,132888,58381,144309,99257,105716,61603,20664,120348,116441,150217,117466,23073,130667,117196,82853,31118,195359,171277,4658,26907,105759,108092,9393,146048,143942,44,216507,23827,207561,158388,162284,167126,79848,133257,147412,54224,171598,176022,119592,89352,95248,14227,58436,119133,87024,146840,201173,162465,157273,124029,187867,172272,95235,187672,118628,29890,25542,199051,172729,58129,575,89484,93097,217549,166110,209692,76541,143573,136835,98013,184050,27871,12000,43032,129085,36817,215440,187919,194644,189152,107272,28039,66660,160131,51359,49163,31652,102995,56515,119811,216151,204844,49959,187901,88587,28250,198482,108650,12189,166000,219536,176197,154353,32301,202413,118097,119901,172610,154485,132172,35294,130764,1137,139031,192994,12732,113458,219890,153385,126597,133256,204755,46452,181355,151664,45521,191494,213711,33654,27146,101556,172365,157429,158901,84369,132149,100837,109750,181538,199851,65074,71721,104299,89878,77115,118710,210972,34962,36223,108198,157948,183504,196195,134294,78358,183892,143604,82289,186613,193152,145118,165827,56975,52661,54974,163621,187271,76173,179991,90516,176258,34779,186226,170416,46612,132092,142210,182009,166953,84510,186214,30252,199432,92337,147398,125926,152779,149589,174891,186154,109439,176748,196049,139959,74954,110726,211892,3347,137030,27910,6624,99842,38096,13910,175703,14838,52403,196656,205922,42188,75134,179146,65683,38385,16261,181035,205614,53477,197550,141865,8468,94026,213392,118644,26864,161509,177085,193853,147294,58800,194619,169638,147133,101944,62284,133935,205601,208885,209406,182485,75402,181649,174193,12315,100029,101595,206598,179607,81960,144736,14103,3398,194948,65887,53238,162676,204714,116146,217575,59126,166143,184714,186003,169772,96401,197946,154241,143291,193580,158174,51503,203462,190031,50724,77116,117654,46511,178954,19521,49014,45559,192696,134701,31287,98880,10652,112330,73167,145908,69268,34096,42186,153088,66850,1479,86655,122245,186040,40581,10470,40292,84575,156468,13331,202810,163936,161378,173510,78945,128594,207069,150123,144221,93869,19105,205529,185514,83324,137729,57661,48200,136473,17872,206112,132419,41778,109033,142365,140780,105753,24102,91593,57985,55929,173889,87001,86787,94580,76958,57264,197304,162078,139067,124480,147150,215924,197167,170210,2307,30073,175358,90703,116828,63005,168365,160878,162825,63917,10453,84131,99974,208720,29720,101155,67263,141619,189785,74141,6476,135493,215510,201980,98293,134823,133361,116800,127612,11495,77708,170794,50465,83968,134714,21586,25154,43881,202720,141026,139776,1921,164346,17163,55255,175724,165023,202789,55882,179674,111203,219064,27243,53891,140969,217251,184219,194347,203215,114094,169492,214107,18905,195792,179842,40305,6226,120383,187922,86957,9931,22943,19201,14226,46659,208198,66036,146298,18465,150722,215382,59675,67803,106858,101720,118259,165207,106023,113514,82943,4427,102105,176501,31013,140203,1546,168145,188818,162096,16577,198170,195546,153513,183732,19028,93768,131815,27822,75978,206278,179798,78997,159007,22676,73798,200612,114734,99985,103576,215514,173204,8079,124253,40979,139845,57636,35751,195757,102496,208868,209055,143058,77326,166211,37768,78133,177547,208408,191188,169853,97813,2922,145103,145508,37060,32305,10950,1318,150134,156409,103306,140811,143530,22315,79872,55064,196161,194361,190300,90138,55505,108174,169327,134362,37343,42193,48232,57878,161712,218824,66254,50941,30047,47541,175892,209626,148712,14257,196881,143878,120917,168997,144091,19174,76192,17658,65716,25743,51751,190208,152199,127714,86730,93621,33610,175976,171828,61481,26226,74746,162402,18299,83305,127365,118134,86073,161570,172863,80130,181785,153136,39949,148256,148599,95549,211733,82924,112278,44207,1266,82107,64533,205956,58071,186698,193938,167991,114327,72529,94237,35207,178147,192144,86917,125817,121332,116747,94041,203100,80077,127656,144617,28529,46077,169227,159023,21488,73561,216647,36700,141195,51493,69005,192294,164264,20044,190690,19765,4883,132953,165577,8177,213029,152539,151755,106203,201641,154674,55773,9609,144597,68319,142467,141473,123759,171418,203213,38963,95897,102891,58538,154560,192211,76283,35096,121818,131366,133801,23738,104014,95385,131640,2776,173717,62621,211429,160965,179348,43474,198497,134777,36797,116974,41739,46961,148467,148324,172861,38325,192916,191890,126747,94056,10595,57362,127832,60751,16478,69299,96845,61346,11221,175265,54204,135760,207476,94699,101736,123122,119182,12173,10460,170006,84953,27059,184480,136674,166085,217487,71695,218856,194487,69026,151684,142698,149464,45573,98739,171357,99226,95760,154056,171268,219013,20404,135440,65951,98652,58431,135363,106389,92510,214233,90448,127627,207790,126382,158390,1729,131553,36429,115316,43412,60290,20208,134883,168683,73612,55447,39384,48996,41250,94954,157816,188036,214159,31019,173305,60383,108688,212644,86566,188810,29711,126226,177007,125518,125572,55234,154872,42269,169675,107714,5884,63876,11310,33524,150336,155280,42239,35317,165030,134386,10273,150016,38499,43605,67701,47195,135434,106287,157117,147745,167148,4881,188625,190834,173472,215299,74305,22478,56778,117755,158997,123578,126266,44713,57504,217674,107680,157506,37411,208303,152737,215076,201192,47679,163371,172922,43967,189841,179723,89829,118874,156377,154821,18046,60244,99145,101046,34658,28461,4341,53044,137025,132214,179402,110046,216665,39206,158731,25244,46510,124746,161152,157237,10233,205486,139311,131944,197232,30806,24429,126416,172855,34164,158197,141370,216929,314,203127,177880,170192,113114,136850,110279,97092,11387,182882,138888,112448,60158,210318,126385,206977,201502,99938,91886,154415,210926,194351,199632,219464,182637,151026,25615,111539,66720,125079,200765,60951,21972,74256,156838,142535,107124,67582,49550,1433,185899,194942,206942,1864,151515,136980,30888,136252,98311,171585,53849,180293,210839,82811,107176,94966,27940,40574,111692,170806,153451,66488,68498,140536,111034,148024,95851,160400,72236,178416,104190,197911,53397,108325,144730,205465,146161,126476,46850,97049,146563,145466,126814,66526,80387,151427,90270,202950,184941,104367,192880,36193,31217,196035,60233,117072,205287,183320,42413,26287,56218,133259,105004,206066,88719,209939,144330,160963,182729,179454,185280,13645,42136,102001,166691,1386,210860,23878,215811,123645,157350,114131,23105,16798,68967,58135,97212,20981,159653,9897,189921,100748,105026,160947,172012,136578,75096,186543,156623,15063,202796,215018,45946,167848,129595,115073,2807,52876,172198,210740,135830,74974,51051,8810,205510,157159,133837,135217,183094,60731,161268,44428,176691,9453,94303,62719,171709,75,41273,22765,72335,204386,104709,92570,175547,103584,106286,153637,52510,4028,193180,184450,23636,144072,74578,190352,146568,152812,91251,91088,213782,72484,59322,158954,50993,119212,105894,150708,1219,68447,45884,106727,132707,27909,146441,18975,131902,77136,89120,30673,210486,134555,67552,141008,213161,207982,150704,65573,181978,116069,98849,147631,126780,201842,63772,198567,89040,203318,120284,178675,120775,153833,7169,126345,30479,67585,17708,100315,206741,57771,177448,183809,60274,209757,82766,108114,92562,135398,140417,2548,183715,218150,69035,65079,75311,24381,104577,176605,196290,215239,89651,19930,145372,46788,124926,82279,92677,57295,150395,70536,47915,195600,179072,49851,130809,193229,52341,61567,192788,63453,69574,54205,60085,53154,218874,65456,219240,162161,93330,40893,27967,31425,20575,177160,173912,129149,160911,1736,200342,141944,11079,126429,97306,162704,82684,129064,150606,72019,128661,200346,133990,73470,104301,58220,196652,151169,33618,14194,137818,209523,110933,125388,141759,8536,131720,91039,103210,179919,203398,45822,119851,31553,183040,135844,120604,82746,180672,2206,3098,176712,55247,99464,152307,26410,85311,94791,160636,68078,143802,68904,62164,70623,123560,160716,89940,95924,48211,130749,111134,55189,109862,183687,197183,97598,95703,105012,73303,118155,47156,149791,34377,150966,179833,59917,213435,26180,188957,210786,61814,145579,113303,99846,51396,38232,209725,193604,39203,123990,211201,2553,46296,110499,130966,35250,165321,135530,139980,188890,205870,127564,78652,65791,147361,148854,12506,104438,25945,181021,42819,15738,56483,213337,124921,165114,117926,195190,87728,68096,94624,94452,146892,146143,212340,67558,90214,5199,107940,31781,92036,178149,158748,52155,151218,150967,170358,170165,141148,45754,204553,74608,93089,85220,176473,133809,127793,203094,193075,117160,20554,99895,160868,75110,121569,165599,37971,43928,167924,211738,88444,110576,165484,160513,149851,106108,18702,20738,42467,90079,59825,82177,83067,171183,76377,186235,158698,168989,103666,71106,116627,204727,95201,196056,153857,136560,118040,43861,48204,6055,32778,190781,61506,173545,65967,195002,183683,196539,20988,55200,43754,104083,27003,25048,16722,143768,14110,6911,202095,104074,137358,200539,21823,27244,68800,162031,40704,22101,171660,102195,81099,60124,64853,193355,74494,116171,164228,33138,34413,138082,219812,44723,7489,9215,173945,93543,85249,124821,122471,141710,71411,143797,116613,35419,182877,141371,46409,154882,156653,119680,165425,196322,166684,147845,114026,137221,167716,167648,139928,81175,91008,126262,141470,59540,210949,23476,115831,80705,95887,108056,68106,40820,79579,2767,24,137184,213746,198782,35730,90147,75173,122997,140272,2180,180294,128956,198042,125060,78541,2394,200768,112836,209181,76787,156500,60611,208867,1892,143710,97948,49432,107934,146945,114584,103481,65508,42800,187416,193231,169342,102163,217167,99184,57826,68938,20764,110687,163924,62799,135426,200419,209704,160750,211700,181908,179474,71267,74626,169989,194753,139445,212024,70470,108240,49283,17947,47816,35815,75915,28929,166957,161934,118690,71338,183624,163251,128973,49227,84054,5084,39885,13010,140239,5801,48742,26810,73954,70567,187484,114928,190606,2150,61029,23644,39500,3860,129388,56934,89630,110164,81677,205973,209574,130651,97359,121552,182697,207467,86572,21030,196757,38296,14690,69862,203332,102871,18713,123246,47606,50382,65556,214237,100028,217121,3307,31971,201364,187349,65263,100872,202172,116119,64792,193362,8784,52213,120805,192760,202111,24631,136613,174589,54643,123948,102006,178636,81817,84689,41579,164931,190902,11058,195225,144069,136557,32018,126151,167384,78332,52094,94483,13654,174770,57091,29603,61857,165913,113812,85069,3949,43872,208244,72377,153274,31365,206869,63724,2340,11008,152325,205383,181076,124589,180461,156293,40630,202059,23456,65201,41339,131050,23960,179816,162174,88715,199114,163874,15742,15669,93196,168971,40348,140601,207478,28152,32984,127885,60994,83031,146368,206716,168990,198363,91047,211163,115007,192201,50584,5597,180682,99029,93891,189896,35319,65718,10351,127219,185237,114688,146544,77257,142274,182724,128474,49748,148557,48923,142506,91138,84850,191994,122786,72174,217933,156940,145364,42033,18055,139234,40638,117803,77508,214805,141978,96236,119034,188,203402,167254,132633,99363,192323,48636,4907,162459,95440,203428,162581,62596,84068,189006,16604,172566,141284,214708,101578,49773,179103,130429,200732,144563,40404,160704,74220,84567,107427,168009,84416,28957,25233,137153,172724,17479,11475,218940,34019,46829,8198,52973,28063,127236,50231,25887,8340,149687,168231,219230,101868,172470,128426,137840,163570,183845,154743,67092,12867,96037,57861,158847,188178,18579,3430,54719,171900,128819,218124,20960,122698,209982,167488,15912,94614,146281,194898,109039,215489,208207,185711,15283,215913,161725,69681,138679,10656,192492,18451,218747,79666,20430,56596,136179,25459,26535,190099,60559,139371,101012,49665,50625,137938,172196,65211,149429,163925,46897,146387,196396,146313,108838,195587,27659,200876,167411,151142,93837,176814,120250,184169,174389,111955,27289,43728,207739,47289,144170,105265,132302,88476,172368,41465,139958,157264,116132,92359,82615,144200,85106,97444,108093,200326,112876,216571,190281,84790,159491,63103,147625,143032,56079,98320,200725,144241,195186,66607,94374,118594,117309,104094,51,190398,9086,74575,156175,4273,135688,35411,79125,1721,78730,58166,56314,82550,5688,202250,215466,180805,215570,72340,145139,134026,145836,149597,149844,40557,24716,9892,120683,93233,40922,36282,69465,96228,152608,65498,212618,78722,24092,149549,104804,149176,56999,192483,20237,189682,85116,207855,126493,45325,127998,216453,180773,184247,151055,214831,24999,156036,168363,78746,9796,191572,211901,151069,100020,20572,204504,14659,202087,87658,91207,214208,113850,109188,31598,201953,59432,195999,171122,157889,166169,203795,165725,100266,14189,79897,137463,10795,141539,148088,39211,85676,131701,182915,57880,40159,13243,176589,191778,102991,116815,197352,156010,150799,132311,58218,113229,75964,206141,103369,38800,34652,88124,21638,124742,149799,195379,12926,143694,104645,68827,22302,104471,131549,113899,202853,182942,34177,199762,35355,126070,146616,51932,127889,103630,170983,149770,51406,147106,161479,192850,137428,64551,63521,18781,69797,203626,112739,84780,179499,150999,23486,29499,176094,108418,111028,95752,91306,49935,84079,90315,155681,171811,6345,201767,79906,44369,91676,161538,215373,210448,163363,38225,115464,11764,214478,61207,52346,179662,97994,40892,198635,190968,22495,29457,193876,14296,63713,162792,38712,218220,214993,75414,3600,89986,181874,210837,29170,79984,117980,123519,5963,86999,54790,193989,141034,55541,53425,143241,137171,196701,67805,143864,126622,163326,191345,37053,182208,183174,175180,4820,98725,152831,173939,138097,11398,66966,44648,15757,74452,52611,206881,182590,161995,71114,13915,121886,171640,2184,17458,101880,173112,71122,173796,173041,94429,28283,5001,76675,170239,101324,60599,99798,133310,26110,127051,156527,139345,23074,48868,52133,131242,120058,146980,186769,5633,10420,64852,79934,87443,128432,103127,195137,41157,4816,90906,179388,92251,93160,163900,79253,61919,131400,72319,212832,16255,164047,67549,101494,213610,7724,145474,73451,39769,89597,102982,172210,35982,23014,191336,100193,136503,177732,160549,163977,179967,59618,109401,63009,150181,40042,111795,81889,158693,86320,117747,142143,34543,54200,171041,36327,152313,141912,179319,209742,53034,36204,81002,180494,184847,137312,35455,112738,201863,38050,172027,148070,41431,83229,17777,146103,139712,188597,31034,48269,83256,38147,190599,200654,83542,108538,107807,79520,116130,195457,215403,166945,193551,71774,39834,204238,49265,27989,34483,48430,6867,199871,160427,161728,69956,158755,148646,160527,60532,156332,60865,180191,6837,94178,20834,180392,204426,124938,35150,131681,158521,122883,186651,170280,48228,101570,128829,124216,36048,42935,50661,59316,8448,107109,3935,70741,108277,58341,194606,175987,53171,198646,53624,17337,46419,115155,161758,128435,180319,85169,119942,64692,112081,106133,203480,10964,159365,192762,216092,99712,188796,82024,103324,160302,194633,149929,140550,137686,168137,184578,206286,10095,136485,28656,118386,119773,216795,11119,64414,135974,140999,103707,192720,10289,146197,101554,135299,200582,165320,12922,49768,76330,115226,104343,79837,202814,216955,74950,120939,43643,110745,77180,160729,14299,180867,135520,37287,121524,217551,169371,118409,23708,189437,131244,29667,47125,204139,153471,182482,143237,35469,103040,84793,118197,6530,46891,118929,154747,186812,52935,119192,23640,65557,84954,181329,170020,186507,208794,42990,188317,192220,32386,23905,29927,185133,117807,166218,95143,22362,137315,185408,100686,190545,207373,134130,160451,41916,99268,144269,99773,11509,201394,44796,87868,148240,172891,39345,42499,37907,46888,201629,186658,13450,109990,80756,81138,191407,201491,116983,75702,164220,47368,30816,34490,53176,926,163200,200643,66351,3200,120230,130450,130716,142475,211669,1507,159379,129359,62358,56852,154366,172876,124705,118944,107518,115834,20815,157605,157313,1723,64150,143482,72905,162467,164793,146158,214989,216334,12823,48454,38864,14434,171252,91493,9626,38109,8501,174849,151543,219680,158147,78234,86798,22990,12853,28544,109302,46726,42293,176184,74548,160193,173257,181276,183460,155967,150020,206387,147371,46509,119364,196433,151772,94421,9395,24412,201460,78458,84625,96618,99336,215027,17738,198821,112020,144302,17019,76764,123421,35270,31987,182090,71937,86300,138163,108464,137532,55009,182407,54801,160446,15252,28652,149780,191293,164882,138660,22982,126789,189811,180526,158538,206617,185284,75284,111949,159604,82965,68068,137728,150770,92760,34003,161032,147491,130127,136237,191151,165637,129107,211807,47161,159752,61006,185751,76149,50739,414,30987,6287,121866,163960,208858,30913,193322,54350,209600,161299,108693,11925,87501,91658,146909,211299,167117,55663,2961,167934,52740,180883,73826,66694,207145,17782,80680,78717,132825,144177,84914,201072,1607,29841,179787,87368,89485,103821,149139,161478,7632,218115,46040,86642,218582,88588,176284,125595,124387,208995,9321,169253,35587,126221,40733,164416,120748,140762,87846,121233,205518,213428,108515,194664,14685,218917,136596,78471,148096,190045,177785,82707,140479,4906,54594,151361,179697,86958,209593,208609,92692,17357,39139,73520,8171,88906,98367,164236,140056,43507,149679,158777,102122,49120,156025,30630,205086,197011,80814,184881,82444,186080,8291,71171,108493,22753,130299,62296,97408,118464,217437,79332,56659,137651,79250,45878,29420,98098,205768,9802,52978,24275,116332,132643,6536,209459,126527,131086,40390,89159,126739,192005,137265,81358,203301,85809,37563,201714,25756,215518,135304,177397,34791,53788,189941,9582,120420,101603,187086,136929,30365,1077,11542,180106,155788,22826,87369,122388,12095,27027,136466,200849,195442,81914,5893,151403,198834,81826,167470,84116,52574,50474,75121,72522,139532,83663,202581,115635,175931,30841,71474,39143,41286,71732,30005,3999,163301,146702,134757,26837,54153,142220,91833,144012,83488,71054,37108,184642,210764,150492,64179,44898,18675,81625,160763,88798,141709,116299,13297,34098,7600,123400,177077,13226,27768,100558,22575,102738,46919,184727,156269,155387,165011,219198,180912,183334,116873,161563,93224,81904,87118,214287,51943,186882,198810,175864,212153,139849,20590,198113,200390,122079,148842,34549,24357,213084,210331,183133,122755,25496,119428,81367,179431,168930,186178,200931,123350,60737,58666,37675,161727,67646,95276,102780,48064,183399,174373,86593,206773,85134,118825,127796,7933,17135,124606,29592,92128,125475,120911,44774,73992,201311,91387,158450,150648,33395,133155,26983,106348,214316,187242,208865,28044,85424,145791,170249,18592,207731,79198,139656,120841,105071,154805,108381,62541,162758,63882,143641,41686,126451,100615,17317,89316,54588,213363,181943,154919,113022,205603,114125,148691,206392,89705,142744,181962,61906,45373,173699,175532,20419,193820,186622,150520,219348,29969,155232,156257,121348,85295,145512,25599,215488,116710,46182,80749,106376,18247,156830,28506,179701,213402,156344,202203,214922,99329,85828,129576,169080,173058,16117,23732,57045,214180,36416,152244,197915,166758,212294,48,198463,38463,173246,118744,139265,76415,102231,161368,117220,51292,53912,178655,82994,78573,149494,208233,178142,79835,113961,188164,90246,152758,2128,160298,133804,105957,33437,77118,16360,96789,143886,13927,221,184380,146808,215155,94173,28756,73979,8174,61667,113171,184083,154707,103355,163896,83395,186587,100346,106439,100024,114082,142899,55369,173667,45458,124484,36456,49342,145600,100819,94447,109375,40440,75620,175653,32911,35200,3041,18807,169971,4117,37747,189094,127585,172471,114487,35592,22194,35971,168360,211272,109014,119545,206812,158447,111595,163380,47132,119116,133540,111181,144716,214976,117976,100850,93141,59176,48704,111351,158485,1947,151503,130588,191604,62062,186680,131525,70582,107698,143174,151087,96198,8771,9926,148937,126685,184780,96620,29665,152113,126230,181009,37247,191413,191704,137994,196760,204176,73248,121880,135515,20508,137194,159926,23185,64285,123850,165324,194428,173994,175300,20468,179325,166117,141347,209216,151946,81919,13834,90058,140255,204086,2826,122911,119022,134391,123006,150461,192365,86904,110366,92514,209249,77232,75814,43911,9260,182203,156666,196457,79710,190571,205788,147772,68273,16100,164967,124036,151556,44323,150171,13531,172050,76481,212047,193299,163660,7156,75187,127076,21629,33245,132538,20148,205458,72836,66073,34744,161427,73655,72146,67359,3723,158288,45773,155128,193715,39619,172793,117087,96932,211770,45979,68485,205134,183703,61917,127118,47398,209550,138251,8674,89875,22125,38029,156118,215409,40409,219575,130009,197331,108251,100444,62376,173189,115391,190316,167136,43852,55274,151455,192508,90459,14241,168364,69309,198045,214835,30407,13768,43935,121638,104528,124841,79001,124850,195598,197244,139597,61252,99562,179178,184970,102254,68845,139052,113962,3109,75705,209009,117655,100140,177862,82755,139002,179011,10752,25523,139275,167096,92753,113190,160035,170935,71457,87809,70864,70015,210322,25762,66936,171532,102538,120876,202676,177023,196369,63385,192495,176131,154330,201981,51098,136302,88449,177659,47164,82121,143553,2772,176370,218022,17813,3135,107806,25611,137694,27238,179699,116901,92147,57518,190120,74264,66033,50614,8923,6212,162371,24101,14911,155749,172125,219010,56839,157125,31926,185617,31451,158514,34054,98555,172069,169844,148391,210554,48435,29057,182361,48303,66410,77219,122409,173140,19848,167452,118867,72452,201682,111931,181461,201819,201882,84778,67459,204664,19397,72689,203713,16354,100348,101866,87203,15744,67695,213070,69380,46643,115494,18192,128901,159986,102926,46777,169209,128989,28424,37715,141723,201512,113050,57248,27852,115757,187537,67894,27076,9138,19115,18458,149774,190721,65692,203748,116885,4919,130469,39021,171144,167860,227,9918,174222,102362,69844,39742,183515,86463,102584,150638,970,185494,202110,128628,125341,105883,190779,204717,212933,205883,18376,141591,59204,867,100774,180685,214427,126370,31772,140326,94088,87409,178739,79384,105018,14468,165746,169626,23767,95775,140149,89145,21091,120491,199348,200935,76010,169814,53246,46299,145352,10310,100706,173207,50070,78347,182421,102263,27988,165312,180575,127533,22246,165882,114814,49666,36685,9358,50618,6649,188675,155437,62517,128249,101913,16513,207811,141702,168665,80669,130989,139969,218693,212802,117783,4303,9710,88941,14694,88530,206511,189008,11223,106301,30982,55490,211577,171402,174932,137132,120159,177754,140656,39047,194262,151872,61075,18883,9667,192796,37117,128160,171398,4997,123527,162102,12006,63617,183840,170075,34219,3765,45154,165848,13096,80566,215151,25504,119586,160229,44745,218982,154938,16766,214381,214294,27874,29931,130323,155969,209498,161383,57821,16543,210882,53121,111728,88432,212735,219125,193443,177837,105384,84996,31667,179329,150986,143100,36000,84302,219264,207906,55897,200337,11950,155473,45468,87950,16573,71799,37024,198587,108830,44053,182529,96974,145072,43255,114992,63869,152564,10668,26205,207369,165726,160547,98600,203859,41354,4097,127655,21160,1664,1893,47032,23367,185267,155642,74378,109110,181797,211658,207681,202522,124783,181497,33264,54043,37509,167726,71027,137124,118074,108011,62718,45756,160048,181318,9862,156309,47186,188009,131042,160445,164414,216657,205149,78957,91187,109782,1198,146200,61771,50306,30803,138988,25082,162246,94870,199525,141485,133048,57481,121691,201937,164223,172838,54554,72463,13911,50782,29538,208827,179632,124364,130420,46222,41880,175673,11302,14163,169046,95471,54942,113946,65898,87633,161695,211066,198812,118789,11355,27373,194095,13751,140865,41430,205912,7619,127155,134978,30425,18575,61682,189111,116607,155374,151981,85451,109855,52101,195844,177432,85376,60309,204203,28870,85493,59711,25706,62159,193396,108159,52417,98443,57845,123467,153378,126984,14244,114259,37995,56666,159759,119273,46677,101536,97906,35757,119993,188696,50896,165680,94002,70698,53008,88051,33347,8413,181458,149072,15637,73558,35783,29126,122037,120213,109634,199318,90066,57633,77374,158239,41101,206974,208290,167247,160308,44547,151551,144901,60215,22668,107923,136778,128244,56739,155079,124122,219411,121310,73182,3802,191369,112226,192984,64658,64967,85798,113835,151903,58302,155129,39051,150392,1331,109516,11593,10501,123378,192276,201485,132177,8813,111594,139230,103368,125637,212262,209195,182623,215296,25689,118604,129836,63048,21781,146404,197415,3256,181707,191833,106592,57149,34789,93953,77539,62668,169774,23784,4175,125772,175383,79911,42415,184367,120398,149521,195896,153002,6399,119233,202331,36339,184322,106262,48482,207056,97103,164314,121712,184523,166042,357,164444,18533,87269,8575,77749,111647,197365,70723,182990,160563,142196,150792,208267,54302,206488,179480,21222,162648,88000,192875,14368,134935,176856,95417,13516,29089,100389,95837,121989,162302,10714,95832,96058,45945,169914,146483,184778,17565,146999,185948,202788,53664,87799,62556,141461,29768,170141,125434,197204,152074,54996,197439,195301,199461,59097,84007,92461,70086,132006,178317,110868,72651,14276,16611,97730,90499,118596,127461,131452,138066,217464,32167,123250,186076,139435,216368,139684,76615,118483,162778,96507,209689,176503,181724,212020,17203,49375,120028,22887,21016,102415,46874,75595,20171,28669,43192,72336,29366,182735,38785,130109,39452,53544,136495,164436,5948,76852,109192,178887,141933,34610,124231,90885,110134,116355,193386,5631,76419,49561,16501,5776,210236,60903,218876,136215,168996,88048,107922,151703,89860,171903,169358,206261,85539,127440,155545,174549,182678,138979,143777,116721,120063,79713,148701,34591,206922,80345,45019,206901,68159,59449,126941,97491,75535,201785,12404,38886,92568,74549,162170,42150,186597,140330,74775,45969,185627,181628,174681,208262,40235,42271,62365,30649,10787,83001,150608,33154,115576,68264,96907,48364,53780,147452,185371,9093,174470,83084,90137,38782,97757,117552,141522,42623,139891,79052,31847,6533,106053,130919,201462,38111,120072,83943,40908,1125,193592,143168,133014,86843,169671,184929,77478,124232,75976,197698,150399,174820,38557,209604,148172,115895,88784,180655,29442,191795,10506,36548,177735,144430,30638,80761,6500,105284,161879,57767,116909,93688,24243,85555,14426,159423,51157,152943,138001,58233,204808,201089,72856,51305,5910,21912,141141,116774,177488,176044,22905,134162,103395,50973,128572,83298,158117,158617,134101,14692,174788,94650,104217,38783,187685,112103,96896,101217,97735,3195,74619,90418,92105,80762,116805,147071,22963,56543,190802,116449,194387,108949,210681,115999,75663,42936,211202,190841,111105,190999,108578,36586,219024,153925,156745,22564,37654,54469,79490,124416,137704,87927,154842,198210,98506,23186,1735,107337,167459,108662,45847,145703,121141,186186,148875,197982,35894,52234,48760,218178,100078,75881,145192,128604,11071,215691,216596,79165,218781,153408,79669,129525,146612,193948,121491,58856,7035,207413,641,40376,68859,78687,167917,218691,166870,123873,32552,171393,174565,75415,109355,47021,128945,180313,169026,64159,7456,31288,200466,72670,129250,17491,156995,195445,94833,152322,125956,148261,114312,67609,164625,179807,124791,181992,214507,31348,134355,192140,150801,4599,68036,42689,152477,15338,38308,83802,120859,181739,35519,93686,42810,13864,71478,94676,109973,166788,93319,12770,75984,76457,138834,167495,20139,134707,50807,200918,164634,48037,200458,156961,197297,43813,142530,84569,82892,216746,179560,209683,78924,71161,179684,182967,180445,25122,140095,152055,92177,215827,37674,3381,35900,135835,73771,97783,204159,127113,89211,42737,26848,57114,131958,57963,217992,49392,208432,146448,160806,180788,217950,137796,139485,186789,144470,184094,198261,210391,29875,109404,120715,20027,30556,214655 diff --git a/data/molecules/val.index b/data/molecules/val.index new file mode 100644 index 0000000..78db245 --- /dev/null +++ b/data/molecules/val.index @@ -0,0 +1 @@ +20952,3648,819,24299,9012,8024,7314,4572,24132,3358,22174,24270,17870,2848,19349,13825,1041,976,3070,7164,7623,16559,19726,869,18390,6515,23462,21295,22981,17856,13746,7223,14719,19309,9115,212,5231,22876,13848,11149,9105,5094,7055,11029,3349,3039,12449,3169,11763,11270,19782,8667,1423,23911,15054,17571,4090,12403,2582,18089,9606,20599,20267,11850,18918,6300,23087,2279,1501,21668,7467,9482,2614,7628,3309,12455,9108,14857,20830,11954,5329,12130,11641,6865,21960,8748,22997,22398,21234,2339,19960,20806,5607,17502,23892,8021,5354,15147,12433,8845,20971,22549,18250,7196,22433,10626,1832,7505,1051,10336,13145,8773,2168,6913,18585,23524,10311,6967,21477,16358,12964,21064,15035,4681,8679,4575,8081,24411,18394,17661,8609,19155,14038,19121,13087,11861,7186,4532,16696,16171,2978,1543,3592,5008,20560,5242,22298,13833,19543,2081,12608,12504,19526,15337,17338,8238,18128,376,22291,23616,3753,22338,17595,8743,21003,11146,3655,9617,14246,5182,14867,106,23661,23582,8630,16403,5854,16635,3486,20489,9779,20937,19954,6517,12252,5293,17674,17378,18,19626,10621,16010,638,3665,11894,10076,7846,1898,7892,18591,2580,2806,23983,15924,2267,17455,4120,4207,21618,15574,18015,5410,8685,17290,19876,13865,6940,17671,23918,22605,6591,23361,10214,13074,22009,12236,14355,16959,14794,3965,8123,7362,2098,11078,689,19277,18150,7540,19282,7216,235,2326,23194,20679,1929,7501,2208,1029,10827,2321,16847,7798,9125,21921,15906,7020,17669,4335,23702,18711,18881,15488,7962,15498,13338,6239,3090,3176,21593,14124,11609,13879,13470,15303,23890,1775,22064,21412,21174,3224,1986,13193,23862,11118,3580,8147,6278,6232,17573,14700,4593,13824,6012,9127,15159,8185,2470,14520,18033,3208,1657,21369,17713,483,3056,7745,5449,13317,15913,15773,7004,13141,1921,5394,12418,70,12793,8690,14909,9347,13861,22825,23937,18211,21688,23540,15947,5072,6222,9722,7133,1916,18978,24108,17766,1997,10276,1873,1643,19142,15623,16477,17403,5158,1863,16640,2625,6089,2245,19498,2226,22125,7707,13230,3928,18667,8067,18970,19481,1302,20295,2686,13737,21540,19125,18521,17130,10366,8544,6693,21945,23468,10295,7821,8703,12969,4288,21151,9830,14982,10360,2377,305,15017,20354,18448,3276,2400,17617,6984,16576,4340,11436,2254,8004,12108,9338,5169,14358,17800,23053,9912,20043,21429,17332,256,21884,18173,9810,21737,3394,4400,8666,3782,3507,24327,5093,8924,9232,19819,6901,23514,11235,6671,22527,20782,8650,16561,16008,8228,1664,3024,20784,9066,1444,116,10929,4286,20876,8583,5294,24288,14478,18077,23123,14014,18379,316,2465,22643,4884,17877,1180,12098,19087,18105,4852,14083,4176,1370,10101,11948,1307,11724,6883,22349,8176,21854,3368,11589,18346,13316,20337,5064,7757,5324,5801,13510,812,5877,24135,10885,13491,21951,24086,8131,8742,5216,22979,3542,12535,1268,15423,7288,6539,15083,11457,10000,7457,7304,775,21627,6328,13056,10756,9129,2274,9146,11506,21020,16692,13096,22266,17570,10851,904,3779,8559,5851,19024,8698,1253,3552,19548,14239,11327,23872,10278,14299,19864,16758,3789,12622,18893,6228,8346,1454,23225,14288,55,17036,17643,22506,23574,24312,24151,21975,6456,11934,14132,2292,21765,10819,20419,10286,4083,23584,9840,16617,10134,21852,13382,10688,13185,22846,9688,18166,4170,6286,13777,21788,12423,22194,5702,20169,18648,9861,13306,17954,13,9957,9401,6887,14086,19004,19879,21453,10559,15236,14476,14488,22138,7002,16750,15505,24115,5560,21589,2778,9299,16890,21753,20740,20291,10983,3060,7696,22046,10171,7361,6525,4828,800,1514,8023,15569,20030,2386,14923,13580,20636,18863,6371,23538,22818,12582,16199,13095,7994,4835,21497,22532,181,3492,13931,7170,5763,22803,16972,15222,1645,18265,8165,3976,14957,4369,15225,21875,17404,18314,19511,10397,14502,20075,23569,16540,13983,17952,14611,5215,24368,15554,14747,8493,8101,20894,9086,17081,15879,20537,7839,8998,14413,2538,23381,9362,7683,8903,11005,10476,17699,2640,4534,4942,7577,12551,22739,5007,23147,7010,2104,13594,13356,10842,17780,15267,13624,2040,6777,13767,12762,19139,22790,640,18864,12464,15629,193,11526,9784,12779,13730,17636,24072,17895,19767,7226,15998,7190,8943,14281,951,12742,11014,21917,22254,13248,23729,5408,15315,4182,20390,883,12911,19395,18493,21725,888,2750,21061,14044,4446,15128,5954,1647,8524,12422,10726,6935,14899,10710,11059,9117,13813,8266,2683,15411,635,17675,1706,11467,7347,21303,2248,21356,1319,1016,8102,6532,667,20359,4993,7816,4136,15517,21936,3748,18480,7142,15238,22920,8396,12087,5498,19853,19898,23539,5366,10192,18962,841,10222,18867,22195,12298,12997,23429,6498,2490,19401,22630,20553,7957,3339,22845,9882,22421,19674,3966,18544,1345,11377,17456,14037,21676,12142,2259,16579,21218,11181,414,13764,16062,3458,14205,11868,20826,15064,23177,5013,14270,5771,24045,17096,21314,8850,20182,17634,15843,15232,14272,23954,19414,8794,10561,8044,2839,9139,14771,7990,15227,18672,19999,21895,11023,940,16197,10650,5958,15976,6950,11626,8465,11152,9163,19534,22976,9052,18212,332,16928,6260,2805,7908,23595,16009,18192,7874,22629,15600,21164,23325,16083,14685,565,3049,9641,7261,13251,22668,7972,10033,21756,19056,12092,15507,18136,17397,11264,13942,24442,18035,10839,11528,23031,14868,8877,10047,8237,7554,3953,23635,6310,10339,3917,24342,17559,22615,6066,6276,7090,24202,15866,9060,23743,19319,17191,19555,9273,3294,6360,9707,7454,11825,5879,9904,463,23200,4147,8988,1491,1786,18132,9572,22852,4137,20901,16085,3361,401,18810,9317,15381,15686,14433,11164,6041,1683,8273,15654,3738,2141,13130,16113,2427,18907,20625,22493,1756,4971,4888,18443,9956,2791,8132,3881,18286,13637,19867,19533,20264,7395,17123,14762,14507,9743,19284,14051,10006,18632,20349,1973,19976,24251,3251,6808,20496,6914,8671,21640 diff --git a/data/ogb_mol.py b/data/ogb_mol.py new file mode 100644 index 0000000..c20ec4c --- /dev/null +++ b/data/ogb_mol.py @@ -0,0 +1,254 @@ +import time +import dgl +import torch +import torch.nn.functional as F +from torch.utils.data import Dataset + +from ogb.graphproppred import DglGraphPropPredDataset, Evaluator + +from scipy import sparse as sp +import numpy as np +import networkx as nx +from tqdm import tqdm + +class OGBMOLDGL(torch.utils.data.Dataset): + def __init__(self, data, split): + self.split = split + self.data = [g for g in data[self.split]] + self.graph_lists = [] + self.graph_labels = [] + for g in self.data: + if g[0].number_of_nodes() > 5: + self.graph_lists.append(g[0]) + self.graph_labels.append(g[1]) + self.n_samples = len(self.graph_lists) + + def __len__(self): + """Return the number of graphs in the dataset.""" + return self.n_samples + + def __getitem__(self, idx): + """ + Get the idx^th sample. + Parameters + --------- + idx : int + The sample index. + Returns + ------- + (dgl.DGLGraph, int) + DGLGraph with node feature stored in `feat` field + And its label. + """ + return self.graph_lists[idx], self.graph_labels[idx] + +def add_eig_vec(g, pos_enc_dim): + """ + Graph positional encoding v/ Laplacian eigenvectors + This func is for eigvec visualization, same code as positional_encoding() func, + but stores value in a diff key 'eigvec' + """ + + # Laplacian + A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float) + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(g.number_of_nodes()) - N * A * N + + # Eigenvectors with numpy + EigVal, EigVec = np.linalg.eig(L.toarray()) + idx = EigVal.argsort() # increasing order + EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx]) + g.ndata['eigvec'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() + + # zero padding to the end if n < pos_enc_dim + n = g.number_of_nodes() + if n <= pos_enc_dim: + g.ndata['eigvec'] = F.pad(g.ndata['eigvec'], (0, pos_enc_dim - n + 1), value=float('0')) + + return g + + +def lap_positional_encoding(g, pos_enc_dim): + """ + Graph positional encoding v/ Laplacian eigenvectors + """ + + # Laplacian + A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float) + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(g.number_of_nodes()) - N * A * N + + # Eigenvectors with numpy + EigVal, EigVec = np.linalg.eig(L.toarray()) + idx = EigVal.argsort() # increasing order + EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx]) + g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() + + return g + + +def init_positional_encoding(g, pos_enc_dim, type_init): + """ + Initializing positional encoding with RWPE + """ + + n = g.number_of_nodes() + + if type_init == 'rand_walk': + # Geometric diffusion features with Random Walk + A = g.adjacency_matrix(scipy_fmt="csr") + Dinv = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -1.0, dtype=float) # D^-1 + RW = A * Dinv + M = RW + + # Iterate + nb_pos_enc = pos_enc_dim + PE = [torch.from_numpy(M.diagonal()).float()] + M_power = M + for _ in range(nb_pos_enc-1): + M_power = M_power * M + PE.append(torch.from_numpy(M_power.diagonal()).float()) + PE = torch.stack(PE,dim=-1) + g.ndata['pos_enc'] = PE + + return g + + +def make_full_graph(graph, adaptive_weighting=None): + g, label = graph + + full_g = dgl.from_networkx(nx.complete_graph(g.number_of_nodes())) + + # Copy over the node feature data and laplace eigvecs + full_g.ndata['feat'] = g.ndata['feat'] + + try: + full_g.ndata['pos_enc'] = g.ndata['pos_enc'] + except: + pass + + try: + full_g.ndata['eigvec'] = g.ndata['eigvec'] + except: + pass + + # Initalize fake edge features w/ 0s + full_g.edata['feat'] = torch.zeros(full_g.number_of_edges(), 3, dtype=torch.long) + full_g.edata['real'] = torch.zeros(full_g.number_of_edges(), dtype=torch.long) + + # Copy real edge data over, and identify real edges! + full_g.edges[g.edges(form='uv')[0].tolist(), g.edges(form='uv')[1].tolist()].data['feat'] = g.edata['feat'] + full_g.edges[g.edges(form='uv')[0].tolist(), g.edges(form='uv')[1].tolist()].data['real'] = torch.ones( + g.edata['feat'].shape[0], dtype=torch.long) # This indicates real edges + + # This code section only apply for GraphiT -------------------------------------------- + if adaptive_weighting is not None: + p_steps, gamma = adaptive_weighting + + n = g.number_of_nodes() + A = g.adjacency_matrix(scipy_fmt="csr") + + # Adaptive weighting k_ij for each edge + if p_steps == "qtr_num_nodes": + p_steps = int(0.25*n) + elif p_steps == "half_num_nodes": + p_steps = int(0.5*n) + elif p_steps == "num_nodes": + p_steps = int(n) + elif p_steps == "twice_num_nodes": + p_steps = int(2*n) + + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + I = sp.eye(n) + L = I - N * A * N + + k_RW = I - gamma*L + k_RW_power = k_RW + for _ in range(p_steps - 1): + k_RW_power = k_RW_power.dot(k_RW) + + k_RW_power = torch.from_numpy(k_RW_power.toarray()) + + # Assigning edge features k_RW_eij for adaptive weighting during attention + full_edge_u, full_edge_v = full_g.edges() + num_edges = full_g.number_of_edges() + + k_RW_e_ij = [] + for edge in range(num_edges): + k_RW_e_ij.append(k_RW_power[full_edge_u[edge], full_edge_v[edge]]) + + full_g.edata['k_RW'] = torch.stack(k_RW_e_ij,dim=-1).unsqueeze(-1).float() + # -------------------------------------------------------------------------------------- + + return full_g, label + +class OGBMOLDataset(Dataset): + def __init__(self, name, features='full'): + + start = time.time() + print("[I] Loading dataset %s..." % (name)) + self.name = name.lower() + + self.dataset = DglGraphPropPredDataset(name=self.name) + + if features == 'full': + pass + elif features == 'simple': + print("[I] Retaining only simple features...") + # only retain the top two node/edge features + for g in self.dataset.graphs: + g.ndata['feat'] = g.ndata['feat'][:, :2] + g.edata['feat'] = g.edata['feat'][:, :2] + + split_idx = self.dataset.get_idx_split() + + self.train = OGBMOLDGL(self.dataset, split_idx['train']) + self.val = OGBMOLDGL(self.dataset, split_idx['valid']) + self.test = OGBMOLDGL(self.dataset, split_idx['test']) + + self.evaluator = Evaluator(name=self.name) + + print("[I] Finished loading.") + print("[I] Data load time: {:.4f}s".format(time.time()-start)) + + # form a mini batch from a given list of samples = [(graph, label) pairs] + def collate(self, samples): + # The input samples is a list of pairs (graph, label). + graphs, labels = map(list, zip(*samples)) + batched_graph = dgl.batch(graphs) + labels = torch.stack(labels) + tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))] + tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ] + snorm_n = torch.cat(tab_snorm_n).sqrt() + + return batched_graph, labels, snorm_n + + def _add_lap_positional_encodings(self, pos_enc_dim): + + # Graph positional encoding v/ Laplacian eigenvectors + self.train = [(lap_positional_encoding(g, pos_enc_dim), label) for g, label in self.train] + self.val = [(lap_positional_encoding(g, pos_enc_dim), label) for g, label in self.val] + self.test = [(lap_positional_encoding(g, pos_enc_dim), label) for g, label in self.test] + + def _add_eig_vecs(self, pos_enc_dim): + + # Graph positional encoding v/ Laplacian eigenvectors + self.train = [(add_eig_vec(g, pos_enc_dim), label) for g, label in self.train] + self.val = [(add_eig_vec(g, pos_enc_dim), label) for g, label in self.val] + self.test = [(add_eig_vec(g, pos_enc_dim), label) for g, label in self.test] + + + def _init_positional_encodings(self, pos_enc_dim, type_init): + + # Initializing positional encoding randomly with l2-norm 1 + self.train = [(init_positional_encoding(g, pos_enc_dim, type_init), label) for g, label in self.train] + self.val = [(init_positional_encoding(g, pos_enc_dim, type_init), label) for g, label in self.val] + self.test = [(init_positional_encoding(g, pos_enc_dim, type_init), label) for g, label in self.test] + + def _make_full_graph(self, adaptive_weighting=None): + self.train = [make_full_graph(graph, adaptive_weighting) for graph in self.train] + self.val = [make_full_graph(graph, adaptive_weighting) for graph in self.val] + self.test = [make_full_graph(graph, adaptive_weighting) for graph in self.test] + + + \ No newline at end of file diff --git a/data/script_download_ZINC.sh b/data/script_download_ZINC.sh new file mode 100644 index 0000000..33ba6d2 --- /dev/null +++ b/data/script_download_ZINC.sh @@ -0,0 +1,21 @@ + + +# Command to download dataset: +# bash script_download_ZINC.sh + + +DIR=molecules/ +cd $DIR + + +FILE=ZINC.pkl +if test -f "$FILE"; then + echo -e "$FILE already downloaded." +else + echo -e "\ndownloading $FILE..." + curl https://data.dgl.ai/dataset/benchmarking-gnns/ZINC.pkl -o ZINC.pkl -J -L -k +fi + + + + diff --git a/docs/01_repo_installation.md b/docs/01_repo_installation.md new file mode 100644 index 0000000..2749acc --- /dev/null +++ b/docs/01_repo_installation.md @@ -0,0 +1,84 @@ +# Repo installation + + + +
+ +## 1. Setup Conda + +``` +# Conda installation + +# For Linux +curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + +# For OSX +curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + +chmod +x ~/miniconda.sh +./miniconda.sh + +source ~/.bashrc # For Linux +source ~/.bash_profile # For OSX +``` + + +
+ +## 2. Setup Python environment for CPU + +``` +# Clone GitHub repo +conda install git +git clone https://github.com/vijaydwivedi75/gnn-lspe.git +cd gnn-lspe + +# Install python environment +conda env create -f environment_cpu.yml + +# Activate environment +conda activate gnn_lspe +``` + + + +
+ +## 3. Setup Python environment for GPU + +DGL 0.6.1+ requires CUDA **10.2**. + +For Ubuntu **18.04** + +``` +# Setup CUDA 10.2 on Ubuntu 18.04 +sudo apt-get --purge remove "*cublas*" "cuda*" +sudo apt --purge remove "nvidia*" +sudo apt autoremove +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-repo-ubuntu1804_10.2.89-1_amd64.deb +sudo dpkg -i cuda-repo-ubuntu1804_10.2.89-1_amd64.deb +sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub +sudo apt update +sudo apt install -y cuda-10-2 +sudo reboot +cat /usr/local/cuda/version.txt # Check CUDA version is 10.2 + +# Clone GitHub repo +conda install git +git clone https://github.com/vijaydwivedi75/gnn-lspe.git +cd gnn-lspe + +# Install python environment +conda env create -f environment_gpu.yml + +# Activate environment +conda activate gnn_lspe +``` + + + + + + +


+ diff --git a/docs/02_download_datasets.md b/docs/02_download_datasets.md new file mode 100644 index 0000000..e120e4e --- /dev/null +++ b/docs/02_download_datasets.md @@ -0,0 +1,18 @@ +# Download datasets + +OGBG-MOL* datasets are automatically downloaded from OGB. For ZINC, use the following script. + +
+ +## 1. ZINC molecular dataset +ZINC size is 58.9MB. + +``` +# At the root of the project +cd data/ +bash script_download_ZINC.sh +``` +Script [script_download_ZINC.sh](../data/script_download_ZINC.sh) is located here. + + +


diff --git a/docs/03_run_codes.md b/docs/03_run_codes.md new file mode 100644 index 0000000..ea22d44 --- /dev/null +++ b/docs/03_run_codes.md @@ -0,0 +1,87 @@ +# Reproducibility + + +
+ +## 1. Usage + + +
+ +### In terminal + +``` +# Run the main file (at the root of the project) +python main_ZINC_graph_regression.py --config 'configs/GatedGCN_ZINC_LSPE.json' # for CPU +python main_ZINC_graph_regression.py --gpu_id 0 --config 'configs/GatedGCN_ZINC_LSPE.json' # for GPU +``` +The training and network parameters for each experiment is stored in a json file in the [`configs/`](../configs) directory. + + + + +
+ +## 2. Output, checkpoints and visualizations + +Output results are located in the folder defined by the variable `out_dir` in the corresponding config file (eg. [`configs/GatedGCN_ZINC_LSPE.json`](../configs/GatedGCN_ZINC_LSPE.json) file). + +If `out_dir = 'out/GatedGCN_ZINC_LSPE_noLapEigLoss/'`, then + +#### 2.1 To see checkpoints and results +1. Go to`out/GatedGCN_ZINC_LSPE_noLapEigLoss/results` to view all result text files. +2. Directory `out/GatedGCN_ZINC_LSPE_noLapEigLoss/checkpoints` contains model checkpoints. + +#### 2.2 To see the training logs in Tensorboard on local machine +1. Go to the logs directory, i.e. `out/GatedGCN_ZINC_LSPE_noLapEigLoss/logs/`. +2. Run the commands +``` +source activate gnn_lspe +tensorboard --logdir='./' --port 6006 +``` +3. Open `http://localhost:6006` in your browser. Note that the port information (here 6006 but it may change) appears on the terminal immediately after starting tensorboard. + + +#### 2.3 To see the training logs in Tensorboard on remote machine +1. Go to the logs directory, i.e. `out/GatedGCN_ZINC_LSPE_noLapEigLoss/logs/`. +2. Run the [script](../scripts/TensorBoard/script_tensorboard.sh) with `bash script_tensorboard.sh`. +3. On your local machine, run the command `ssh -N -f -L localhost:6006:localhost:6006 user@xx.xx.xx.xx`. +4. Open `http://localhost:6006` in your browser. Note that `user@xx.xx.xx.xx` corresponds to your user login and the IP of the remote machine. + + + +
+ +## 3. Reproduce results + + +``` +# At the root of the project + +bash scripts/ZINC/script_ZINC_all.sh +bash scripts/OGBMOL/script_MOLTOX21_all.sh +bash scripts/OGBMOL/script_MOLPCBA_all.sh + +``` + +Scripts are [located](../scripts/) at the `scripts/` directory of the repository. + + + + + + + + + + + + + + + + + + + +


\ No newline at end of file diff --git a/docs/gnn-lspe.png b/docs/gnn-lspe.png new file mode 100644 index 0000000000000000000000000000000000000000..c962007de28593c73ad35f901957fe2794ebd995 GIT binary patch literal 190513 zcmd3Og;&&D8?J#01|=aaGee9BNDhsXLx~JZNJ%3xG}0)7G&n;ugtREqHKcTdgmg22 zba&mKM?L4#ihRiy>kP>i$+Nh_RGg-n`^m>}>YKAhrWWW2q$U6;Onmmtb# zdw(%uVs-JHj^D;{P85Mcjz#%X~cDuh+aW*n0oO5u}>h6R~JC-QSuj z;Bmw`NS?zQn8F%3bNp)q5jTSSGL>UVaYkyikBxk&32Fm6Uc`5NGvhTDWU({U?$hjP za!kxg`DLRd*|`khms7Ow#*2B3M8vUhgOx6BsjJ<2OczqOXvpfL$Ai~b1J^(NIxKhEK&8Nta4N?4Y4cgBJpzd&p)38w zMj3b(ywnVEHS-6d8NSqmo_J?%p>?etFYNpI7l!yI$n7R*f1R28g-Fa_R;ahBy{l?z za4xJUEnP)XkEh&%}?*My90zd`#bB`10XqQ-!oyYVf-by=!H-oLCd z`RJE54j!qv?>wYebzr_mbxvO!u;Q#^2LHHmHS;lj^1`06s^D00Ibe`IHVEE_-Pg)A4`#?_S}?9&#~mB|`vs@69%%UzxjxgVkd zTz+|O-SV^AQ3RTSG5%FDG?dnp=?n{p=3RbiEc6nt2;#>HLW{1=PyR9xwBk3s8GM4? z#luS}h}*E_35ab8VodO4li;2G<5w|43#N(~;d?N{438ic)vI8xy$=WFJPdLdDA-B# zD;|sp_)VH@kI7Wb`4ODj9(;2(%E(KY8%!F7JEjPZPJdkOFFS4NQfyl6%hX<{{<*Of zR^!{hkL7K=NL+9Yx8!R>!Bl=IuYh?URM4Hzlg89=zAj&*WKB6+LmUh%qzG$nX`Q^w zxc3^E`L^5^xj0F3g`!-A^X__+V5dWS%jDJRZ;ayhV$T+f7u3_rd*i{a~*B2CfQ7`b0veQPomUXl!nWmNZ$7dNaUAxO(%b*#%IL}M*du!ysw`LeG z=6IcjTf&}_wC(W?>=Xr&(6OB=+R>D`nho{^zWW)vYLs~uEILixiwL*bO6#in>&nOv zOaBJjoy;?#fLW?|Yo4Zfi$2<7Xv;0VuHkSDI$pSN(*yf5?CC;W?{#%0Wo4y?p44F> z*T29T!5#FA)i${vpE`onk%=vBSSB4(n(BBSHW(AZt35>CSP=!;bW7^kRO)#XX@BUSABMfk#WeRqGGJ*CHrWl zhLgFPd6Twyk^@@Mcl2uLy3?QSN!xCG`hAbjqkmFyk^Gn*RIFuKqD9UYx3>9-ZR zG&Zaj(TZ%H0!AMow?uvH>{r$&4HnDA^UP=Omu{5o6mr1Q1-g~9-}VOgzl-z!;~(@M z{@KPom;(1g2Bb?KOgPL!P%FVKi}ds}UOsDBQYswhHeY%{E5?A!;OW|?T>i)lDt^gY zfPQQ1JZQ2qSyF45;=QkHqbl$lyN#5;xmE04TjxxRl3YY2FJUr^T#Z4RVw;j%{4Quf zN6yfC^hp{Eosgx@n+#p-><_c0H?%6b z^*oPV0x@GZHzxx;!%?B>- z-K5!PBBbBqnPb*txLr*I-8`E=TR9e3H_X+_U)yFG6=*&zWS-q@rk+VC)ot9cJ_Zj6 zcftL{<>%`p*I(sug)I?pEuRxu%(K~4HD)!%zr z3~;zdaDVoIF$X!ATwA0>=*px&p_SH5Y$mCvryNF%uAyB}x(*eb1rtqmjqvq+nxhdYYbFZl z@l%{`UD(3S&q%a(7WrH-^g7N*&RIVFf*`1W=We_b=FwD@6uJ99)~ocgt8>_7{yxSB zRYNX|9D!gUQ}WP=80*Tj6hQ-<_M^u6mi7*^WA=FxfBeyq{2HSA0>{35*FM_P;5Z>X zKO*Y~b0)3`d;>7yi^&|vvO*_zhvnX`6TzCH`|@M$ZfdHpaW_SZuD~XoFCTeM^pPd( zEsV|0f4Vf65jnI`RhDIN#eJ-hX9i>^^X3Tg6nXC_1<&00!}5}I89KZwqEy{6DBRSft<}h@y^e)>l+ahg$~hH9nRe7b{#Wq1t)b=1ym_nS~Zg7 zkl)h|^Yn0IWNfSQ*lt&1o0u~(*tw{R^cwZ`f8!l+gIbQRo)l=ez3?V=qd8`h%dPi*$JuvL@U_s|u3O7nt)2Y`;Eb`ABTtLpOvAbW z{X;cnHghJ49V11?7d_*5_&j62F^E6D^B%L_l=)rBx<$tE_1_4O?>Qnt z-<4YHu=1HFLZpK_ZnrX&X)JQctgW@$ltM?eg$x-7IJEVR(zTewrmPm1w_7`R8ZPj< zo-h`koe){>V9EP_AMihjnyrh#xPaOtH09pF1BH#`6;07IgbO(-3E>;@_`TUU6fSXr zcDROs+&cTUve;7K!AHp%ezIt}%`!-3uD^Ph*)pl8I`nMHwyQJ#WN-Uv^cZfpZm{g=nK}NRrJgVU zm*-O`Kl+E=L_#2tb7nXE@qgI7Yy+23(PGdGD!B_a<3nFkpHqY!20%O_nf)(hgb5%e@@Umhyu5Ww#58Gg578L&vIU{r0V-n)kkZ zE9(ItYz2jNCh@4^PLnDGmh){0geu8-JQ+-L9!$=Js1v&zVW6V+qk{)tJ#);r&E-2> zcvzCV68L(y-x6w6*byRTLSwoQCH`|;XX{XyD4({Zm;_bglDx|)QBE>D1sqs{q)F=O z8>1k$L^iz&g#xrJsgtk5)U;L1(K0KrYDwq(;X;9;?$ZG~x84+|I}3ZBU0C+@rKoS% zb&#N$aaYUJQ47NLN&n0wS6U^ENQm65mxS{MN7%9WK5A*e-dVxbT;guj{g`+1k@pOC zT9V-zuW{QPwVEPWs&_jL5Bn?Dk(I+IOb6?eFGBJ6(zX5d?h!!iVZ50|WwRMss?ZQb5bw*!3snOM087jEcYI|`Zf5<`RlfB@)w*+}k^Wdo~ zm@%$&XF)VNkvLfIU2tQ{+!WkFbJ42@bx+li@T_q*aTwj{?&*u&{- zbR-rh;_HH0VNq58b+PNMog%|ehj4A3<4lnlv<6#eS$l0T?#bAzZXL6;ks$w@&;%6V zOTa{;{tFlp-;F6zk-X@YaC_%-lbzLV5?%Lc!J?O^6ojke zMn&NE$Ze51la}}>f358|qgbw59$iVhyXp!=^-;GJdge|`oOkCk4raE)h;7V1k9u}l zr3u}8Sr@W>L)xH2U)5a3Z=J_$e%3b}Ljy$J27Bp{L9sIbC(k8bU(0{lIAPkZ_8%Us zhXJ5QRWgaNa6D=q?VSZ~xyRr`qZO(MOj<~iE0$e{)PyMjvU{EIijhPIiO;oF8L5^; z_f!}ZR!`~pa3u7sN${4FLW8#&wlqDH8IO_MffMAtA9LI;&+Zx9r5*36Mr~VS!8kW> zqJX5%B{_~M6*#sY_WxZAbHDf}e~s|KLR-Ps$-#Efn$j41RZ=5h+D&;*=>+@26{%0{ zXh^QOhMoQV0aXmlI246NQt>(8cS34gbNq-HO=_r~XGiFX)ujHey8V+38EYZsdt0=& z@S;S0qcLbQHY7w}{RkRAhQmseb8T&Qd8U=KBY}itp*Nn}?s}64?BcASQb#nGWb^FT zQ1%u@uA>jje3af(PygS5KezzMa5Zu490W{hFR<9|p~qtGfoaENSbs zX7PQc$N-I@ay+}j4?qTNk6J2sZsk9cx3j#9Hn=3xL|eSLq9Dd0@E_-&YT2eFN9q!& zAS5Uq!LFj!bk}mC!e4iUkgHt?$Lv0#g`cqN3T;)AYl(E)Qjlru{6J+5GUw|4lB#vd z)9a$#oA&PaV-cW8Y!Udohb_7$iR_-5*X`q!Ot{C%$?v3H_`iuD3%>tLi%g+@Jl_S5t!OSEs&G3`fR*w8I#!&8(i$O zT_4qAygVC1i;VB^SU!0s)m!yfM#@_K`cJaf7Bv=;_CB)=FC3CY8!%k^y;ofNC?^+{ ze@wHz=fG@Tyz?{3ECa5{-5vV2&JAfgREfuH+sALm(WTFYmLGYZ4hK0@r;}t^1;(9t zWPCmN@W0Ll3}d3gK{@gvax9E%_Q-kmGSx~!7*nkp8V<_a5PW5ijDjFUBX|*7@c*z~mo*kD$DnLW0FmQxQNl~n0mJ$PC+8Duv2r6r6&C;(Q8*(7 zl#s${vBM-Ug==J*CKv(i9nhoSs)I1Q%mE!UGOV(TGN0;aq|E#3nuf!!`xD{+k41X{ zi*^GR9SX_9@d>8fb8R;A>3SLPP1Cx}bbc8hGg|j{EW$`r1sqU8+QdlRm*RBSVZseR zVSLQr_Hl_%hckKWBQ|+Gq&%wa`3&6gdB4DDF{|xf<|RbamCXNK{P-lGP9>5!Ln?z- z+=>&Ci$vMrFZKw(r!N-O<2G;tVVYaVbxDn+Lm*3mRC16JbR_k=;GyxR!&S2INZOeZ z{_+TP%}s+ed#F>I5Qq8Jkz@~Omz@(@|36IFRx;)g0Fnc_gxvjTmmdCpY!w1ki+~&C z+(8*7Yth!JjQEi+l$b(9LZjhx_tesaIOSJ7QdspFpb~b+QT>_e@WGbhu>Wqs3)q4% zLfadTmemEfX4Ofd^?gGiGl6&HuFuPm1}@mA+1p49?So{^-`2^PAJ<7OS?D6J{x4(b z0V4?^v<={B8C|ekmN$qXuTF;4TG}6SZU>gDFSxTfrj1%k`-U1xWknrHJVG7CM@RfG z_%4J2XB3(_gr|}WZvE&DlFJ3gLHY(mjym9W-=aYYg(O*_*XgpXuG3^GlaXbuyU3w% z%wQd1Q-Yvh zhOSe|tpt#6NccaYL?n<40e6fUG0Dn`VAw$;h~x98-MDZVv8LUPH?dX1YG9t>GG-sE zAe#T=6RcWxJhE4fGkm6eGn5Vc{TXApUqYO$^+Imu9IZykm_QDnK7q78i-9A)5Tm3L zSx-0`@EyV5^^-xxn);w!ls9NsniGni>hQY7B*XAETf9lXt%1bs%$z4%V~eGi^m4=h znpI#Qus;m3B}F_p-_9xK2R?>@1iKihT4k`SDXcKG!#!5?81NU)CbbnHEVuS@Yvv}RP;!}I15=EH+-%DyLkb%$a1hfkzJCYb$qZJssoPc_;w9*S+% zGT=d9Qd$IwCf$}Bf!hW!MB}B5xzEilkWiG>>kwk4(MpHm3Dob(if`B&eTyDCYib|b zX^5L~`QHwA<3UixAN!hUw3gGgAji`Swr5 zEsvM%Eziq2_rj<{-#7B0s7n}zUpBem=ZbpGeiWBI&G4gpIMi=DG+1@JL}Wj0I1xiF z9;owWQf5k75>@HNQ=l5`$yAbA+TsD1SF^)6DEZ=j&M=#QT#=jqrAB34#)*gl?_8V( zgbYi{50PxsI@PYcng$NS`J#N>s+e2KKR1LcR7DIo!MWf@WQa1jJShE zpEqcqg-tV{7C#Pli$;9u-R`)M{{Cyu>GSO{e0uSw4irg!{^xT_jbl$O%d-=(&zG3D z9e8c_>XYo~DF7(xDi=%fX|QbuablQaSK@YOe+W z0~TpzX<_UWa~by&R^$YWzDH<%wl2RY+Dyi6QY2SFc!-_cR<9~8DF+KWo6V&lWY(7>;Z_YYA@8%Eo>ceA?UxqnVEb8D zBdZDjY<5hC5pv}9&aMSX%?pmbTZdPdg^GR8lfvJlhz(X`N#lG&v~zf(S!d(HB;1>( z**PvW{Mx5_wB<9__=l5?$ko`v3`+1+U{KyJaGZ^|=4_`56X9V3W<}u;?kXm)8hL>{ zDy?Hr?@k7<*Y61oQ3ZB_-24aF?`{M0x8@C@Y*CCqqEQ-?#bBpft6{jW2`wgGrVJcK zV@4~3PapqDs+?v1)Wy@GFzjo&K4#^af_9F^ykxy(_d^TY%|=SY-U9|&;NtTlR(iQP z6X#4kr*VqCIgT5j)hTImD)BC=&*A5N{9NT{5RA6Bze+jhwa z3nhy8=fc#+wW-|NPU(`>b$sB-w^paOE%zm(^?be$Y6s9+SG_Bm5|wciykhdGby+=s zRL#&MtV>{H^#vqX!z8f7P`D((5!vL-VbLxFVb}83(c7(^R~DC=VGK<)uHil^=-K~S zvfa>4wUTfGg9F%-6bA7Pb=3bC7&FGk%00P|Wz{0)V@@~tbF^M_Bs`z}EXKfi-=<=< z#6hkLp0A*=-(tt3%U$YQ7GLv*z>!;v3kN1hdi(<3Jyk4*2^4)&JcBad!Vx^pxFDkp5gI%&)K~7#@ zoW2%Z5)R2ImAb2c&a7&AHJDca`dq4RTE@2ESqDjx+tt1LsBUoy`ENO6XEko4_jZ;? zZ-mf$-B~YozkJQpVrXFO!6%=r-Uc zQN$i!xh6AIJKzM}0ZL{g;d;n6?!H7zLoeY8_bbb}<^5zB^ooJOr*k6c_;Yai*4)fC zZ6o%u!0W}Vr{)GH>+ySwaFI@vp zF7*j^ODV%hQGt5|s;(9VIqd%4R)_6-0%X|1XNST*cq{yEwWw5=U8)*=LA7x;y5rn~ zieV0f8&^QCSja)sF0n^_&6cQaEuBqcqlWF=9DeKrQq3kVqX4MiwlQ+?n1GOw#voA1 zI+gGC>A*}l{}-d$N5W1k24-@EioeaB z4)_y{E|;lg<8=IfM=~lRzw<)(;9fLF8~)Mo_|=A2w6?dh_)@UX$IEq2=m->A&um1M z7E2{)dR^^V4*ogY`TTClTtjKx%+R}H|16ic2O^S!XF`lzoo(=rCzQ$_U=@F-GOfS5ySWwA%^2^Y zTWJy#zpVH>a|<;6O`~Bd2rQMe4bOb{8M)Wv!z+&KIU!yrv+F;OJZTh99U=1_cf!hA zJftg`#&wM3ZKE-V(isu#-TX}@Y-c)CU)p$H--1vDzvh`kk7W45w%MdR5V zGoH3fzPe^-17Ei94|8?KrMQ}aZk}m?L!U*TUV8+tku;%I=j`U>PH2_CUi*%y0_4!^ zb4j+HJ66Wy8mFewKu!N+vi4)Yn++2Tes@LV*tQpT_JeURME2m9D>qL1@NfKXl?Eb6 z|7A2m+c6tnk3F{?7qyvz|HwoNMyp3sz0P@UN z)Z*>HJ;$A5@=h{nI!D8*g7Sh$99JjpL1Iai9NL8|XfHtfqX+_*Z(mUV)`i|0DX3XTZ##dGcE@E=4JG6Ur&6CEok6}_e`$V(n zTOyzKR?2n^EV+*Co)c42a*wDG$L&d3KSy%ZH9YI7;-EKz)D4;RMYO_MUp(%Le|d2P zB>2Lui%sr!tZ5g|skjX<+hn9=M6SQP`4DRtZP@)-9ENQ@w?6ntQoOt@!@RIT zc{73MAeVEB)X67ESIT%lFPgVilQ$nWUG3g&$xh%UNh*06;*?t%tj5VJ*$`+v<8+hK z>G>p96g0>#p;llkN`0DOSJ-o#D7CdPgk+{hsSG)`z1Xbtr$y-ePms;Ez$|MLO0!Fs zQ<*}W=srWD@i|qiG&QqUvjRETm`83`_Q8BLhf^?lRZPyAEddxNj))GDxFoI=FqlGBc1vQEJS(GElisNJ>OywJ4;k*ua>2 zvvgWC^*%}Rv3kxq-Z#ThqG$qKIrYM*sukllu8(|2nAZ1&U14a zz7ZJI(D*!^WyKO{?zR0fIo#z7lbtJ&Qaj38(?9OEQcO#;k0_d zH^jPl4(s>dZn%eMZc6kmJn&juhb!?e^_M8gw|HhF&*>5Bxn$C@k@edPjGp7v4LO>= zZRGb}7$tRgj`pWpb*>D(Iu z{Q~Q%aFTWrmX$f#HYjqRQK>wg|LlDHWsr8-&}H$!Op^8v&&aYnrdEN%@DwLEe@S8; z=0(RTa(1UKij%ywxL_Z1Ca^Vn$Ebmx>&W_X5|K#dm7h7XEWPX3vH_tuwvVQEMwz3I z5x>*q6xPH)!ZeweElLxvXDfZmQ+nq|l%9t!_=p+8(Zp^N4%t1chrZVb4PTs3z;Omp zekxV6d^vqhXKXA_9a4StRbJHvES1ioT|<8*J=AkRv+~$xrJr6#9<e$(FrC3*!zC0C^Zu+=RqI&98$A8GPndDXfTFrVIbC#t4ZBWly z>6{WXwNa*5*`~xX5|Qs%7<*J~fho;w{-Om&S{P5lE_Y~6tlHUkhkkcHNIu>)*^cIN zJCbVfS>+pHT0biIt*>PLmpiAnFo+9LwMHs2%F zUwrS?%y4L>aWtWfzQQE?oZ=@rWV7(=xw*Uv>V{Mg;|(j!gEQ2w%r1CSegN2EGH*Fy z9v(JT8S(g~=997X#LNxARhW4>tdEd<|MaYJ`c!Kisn8uv)U;Hx4zk_6lv|pvHC>4z z>c`%G6hs-=ttF>dL-={pgC;=MiOR9Xun~0MOx%3TlcwBp(f`HyI9xgOfv+h5Vy5-@ z>({#l#e^?&Zy9;&r{H*Fes ze7$P$Yuo2RVp|VLgI&9mz}Vq~UCIpxzDt@In7^Z0_nBa!O1`^`;<*!RZ{=81*UFD= zsJDr-Z3TPPvv%0Gyhk2DZ zsT!}h7CAs;t&YS*N2o-mle#no=+5z){MMt9uVV1Efva4l(XhFgq6G4q^RsKRS3UK+E<$f8Dz|d(TyHCtJem$< zYt!eHMT|&Is?45a#Lg~E*C=h3e;^L8@?cB!_?V2%Q|gF&Hp8!GVA}ol9daGa7d*o+ z`($mhieyl)`-<-oBiFzWHsn?TtF1Up`S4{~@rQ5M?WIiD?3)CSzt@z0z2a-I_VM-Y zYkQ!z=@IzaPN<-e`bbwG_QSVlbMGs?3TFSr#gh#vL(XZ(O$x9c~Jt=b|LLerrfyTeYe9MMZUdYdA)xhAd}r=1*Yo zPv1aZ68V@KHb!RsQ*%M`D1y^nM7hKWawYl9H-vvoS zzv@k0XuKqqANlfV9h6a^zF<)%d@yLf$e81oqm2I zY@pU9=@e+IsA3>A%dsAaW_ageIIy`!x2z3}LP38AcFFCU?RTWON{nXmH=H{rwVL%s zwG#N6->t^PX-O1){pgf^XrY^5688B~p7k{lG<5sLd}Q(gsylkx>L3Dx8RWE&#>njL zuLLR$MoPv|>L+YyOB!*;y-gd)1#fmPkmzilH>_q`)~r|$9a)0YuB`JNg{Ybd-42NP z@;M&}iPN&5GY%w^w{>(`^ETTPiTivrR%o!wZs<%1*aD8Wo+i22V%0wwUNiz*5)ld{ga7KjlnEB(&%|T zE>XjF=8jW+fx2kU#(0Kmn03g+-Z8Je&j$}-H&!`8*9Qg(J^WA^y7Ev!Me;m?(-HfU z>0B_3nyX_4j5?9=XNUB~(;vE`pMEt5=1Yse0=hGlE^y+4~J23Y^1%AhkvawU^T zW6gQ!`%UosNMfTMn#_)lr0fP_%az@W-pyg;z$PXudv1=tR=wz6cgSCr{iJ(^Ziwhu zhETHHN{VN0J~fP&7{@&8`GsrEE<9|m1{w6nOZ_8jWX&4Gox}m+kCzYL^_}WduVCYb#Qk6v}|Ho&uY6lEsK9&24Ujd2JuuywXFk5#jwDg%%hvc*vLrICEUk)eW9d4!TeC!Om9VXiPG#m; zott)i+X-)v4JW7Gv^WH}#GlshfAs8AiuqQPeOQXAWJsVfQ_Sbi!P8a2K*5;U|^(=O7T?z%cj zY(H8o7(}3R{K|z!-RW%KdgHU|4d}HJ+)nIGO#rI}HbVS+KfH0|?H%OvUxaW=KRW5% z$QVl8uhkGy1)Gzz88i-Y&DTlnm6xy}CumjgH6!(`;*(Tra?X^ohZ~YaQ@$qMWk;3E z<$tGCM*m6?Vrkv(lup?3v_)3XR+N5uAZt7#eZy<*TM5%re@?)Kz>fYWkVe`%x?-d( zX8-x!dk$exYdc@1YJs_Q7yw+})t?DoVwY9^-jW?Y_S00Ow}`L32=ZWEJpaou5B%1U z)r#=-f^SA!M%tU<;8Qh@q@d-T3JSfQMcE0b9l_-zNF{pCKX(UnYrmD=&cp-}42S6* z1-zMzYy4X`u7eaI`mGIb3hlU6{DfN;c7czOC$|Ok*9|#M@}X zIA>zwPV97ZB}i1y%b26i=)2)k(=F$X93f|1qT9D0Z;rVZmoLJLMAsF|C!brM<J)#B+Wp zY?3*oA*rtk#C?PZ2Jxy0VqM9x#jb){Wht!rwgu3Ha0XN#IjdpLSB>1D8L^_`@~ zRn0l z%IhT1C+4nE=60EFPFZwu5ju`6)TJ~H?O!`(dLl!9=aQ`T#D9uL7k3q)l zn^=SN6&y`fY$WZQZ%l>LgV-t;3ztVXZfdqplw7HuZT^}osYQD@tDYu%je57y0}$pq z&n|rXaO!_uIJmz7-)|@at)78y5?R1c+}53q5BqB@;y{3y^9xRjhk#6TUP2jhEO`5{ zCMa*eASe$&N%NVq*u&@F=x5`x)}L=3da2WL_fg5O`FLRR7W;_uXM-l-&&%(~mRj=} zo0O1PO9*B=+YN@Z8F@eK1FHT#MY(vUHO@joY(7xQL_T>+k#H!WO8gcd z$4K7h7G*Kg!A!GU~t4x=^x% z$n2Vf|{MS7Vu zK_+G0F+hC4l|E$j^f`nZLGeVq>t#5{ zbY8~|)=IMS*$`(2v72!^vpIMYm`F!iUy&tskyc7n?06g^V?Lyh#~JhHi-%ZkT*;S? zo;UH7cIWS{^VTsJhxS@!HAXYY`8{?nhALu!KD%g(yus;6Ixg1#>0S!67o7=My+7CLy3eTEWm zCfPSm_@$*u$7~zRSZTZyt*@>f+53{8Y{9Z}^oeMXh~A9jpr~-&bYr={>!!3>N6Ghj z3p=EA_vGNV9woiO?fAqwX0TqL@1K{0|HyU7WRZ5#J6|O+HfG&IKG)VB%Ag@CX#zgM z&2TgeCsPzA$B4y`QFP@3dLJouup=kXd(N}6JVA0?Ueuf{#sGHcWrZNMLW-uUTI9LA z5^`@}xf8BeM$Un*WRr)yi3#s$z)zFDde?KE!smYM+j>=&sc&g|HV37)_;KQw^Mwb% zd@ID_RUc@-&*OCHRKS^C|j<94+suo_gDC#E zx3gj6PJxnzcruox^H{!8r}bd<0-+*l104*{#r!n!YqHy70EO;A|_3Vie7Fcx} zndq4Kko=s>;M(oZBrx0aB*}$iZoy+?MWJw zZ_K7uK9u0=4Mv0xAXMoBgcccA!Hi&L_w&o#hH6JBnZj#Qz08 zSg`*aF8@4XYKo1<6a=Hd1awgJT{MV*MT^5BKXwti8Hzs&KW5v7J0hN>m2c~PBio*?z#Zq<5n9ec{R4o}BoPeuLH)7`sH1YS#Sz;-5nrI}pYYnY3E*G7 zEorm>0s@6dlPZ=Rln;rpNc5@F8ndaS32jqkDOF0F+gsc2g0sSaC8r|N#5kqP-vLXW zipY7{{a;Ty)2<{JB`{0OK@TRNg`$b=WY7vFxIiTtQnf@->jP_)5hfE3%Jt8eQ3&+j zboc-4ju;N2%_0<)Hm9*hjZCOoddMwVAW=u3fFq9v=}3mgzv;pJ2RpX&xD3e>=U|9+ z8k`#*2#K=XPo3 z>i>XeJZOk*6XSCc89)&PA;AShA}yg&Lq?E`^H8+*HksTAIkz#wTxeDx6D0ddRT(wX zrkW-;#h&BIv@2K&_ZXncT30~jF#WM@3J!Q8^z+H#!5u)STq~RBP%qY@Q`Og!BVqIk zYN8A^$z8~XB7 z*uUis5ty?_eq3Iw^e0%$3RJ>@y^CDoGOf|8T1}PyJC1fT1?sAw0J7i(?FL)|?Fs`c z+*gz>mkCOwb9)7T_PgO{{QI5WjNudHmeWp05ggKR1q2mqf!h$=_D=JugA@@Huo=}= zvGmAGk&tToNRe~*G%6#4!(;^(BxFBXKoO%ZfC@^aaiBTvESe^3T?njmEfJ)nUb69= z7p#*ht+?{YC}Y`b@vjH05mtYIr?MjmOLsY5vF+St>Ad)Z;wAkfXm6!Y7kry$1d)y9 zPsiR7m5!TL3FNglx+s`xZ^0b#pjS)*94f@>kR}ujDozHJt@Wzfr;1B&0j%T!w{8PzZH&F2!wQ|!AWyoN5l<9KybtUPoq+rR9JjN`>%GvR~MlOhD`vT z;YTR|Jb_`A1i(64fBU`Gzn@Y_1K1?XZ)QS3YG)|qdf_M^VD$pOR_~7nwX!Rt(ty=} zb&<>RSCSJ*41yc8$}5oy$to)6@K5jK zA4ADx0e3hD7X&ejM;V2Y$=+dkLbVg?-#$nB-&9RU=DMDFWcf#X_)8;xTUF~#<}SF^G8B=7npt{*BG`j>xFgx+urVeAYSI>uH8Edq?$^4Atj03BrvnOuin~@PJ^`7}ye+gWS;z4LBppHVxv=}0e%zz_Z4R#0tkYn*T za(gsNezvJ1>C<+R;)%GAf`1!$-FL#%}-;qsr<8oGLugC9Gv)^ytS?ohaL>NV_OE3!x zQdtbo{g69TO8~ zwQP+ywQsM@Zg-0}e|>W~-)gkPelI!5I7PssC*=wV4DL({V-OFWntB0YW}fNKQq5r> zpLWX6&u?jIVMV#88Ex14IX z!PHx#j&kPgpt3RnpLg%#tmXo;v+oy%tbpuK4mM+WUy_E>^%3C(rbH;oD_Va>hTkgBb=Ms4%9YsEHGb=f zV7A;!;Hl>;x|!Ko9jzo$)~~GGvHku1W6e%0LvK7!e>P=uzVZ!fzrAvU0(bnRWqWG- z{A59VbHZ(XF7)30`!xKP515$43cb!feSCakdP9$Kvz6qSyfxM>Rz1#sqKwB_Ig&1| zJ!qSz-g@Qs<28GVh|XIy{0%i{yPT>YR3U{{KUo7YAm|3D7r79Se8Vt^pLQ!bfSb59 z_)Hs>oVOsjm6wq+VyFldng0RqIL(LVkWGrOM)z`w5wva_ObzJpeL5NLcvO z$7sKQg6z@~rLic@Z8*}gurD04zUY}erl<-$fob!tcVUFNeBQ4o5)U5uqJ@zNPX&J= ziCZ4{B8jz|`&Q$1ZV$q+a#_SC zL{}gELU?$Nc{%9Rcz~n_o-pLH{NMMz&UMcD$cMRRuIG8yy4PCweJ_fqY%&iyFB>Z-vR+XwNkylf zKj|J$ZSFn;B29jL^`ElYcz=iH-CNn&*_czlRjJutW?wr}CH`sFc1nNH?@w{uy`?AO zn>*)R|IB$2&|a)0ExY(E-&90a+xgxAv;tT5ODZ2O(}#rPQ*#V6oZY_viBEf6dc|+t z_~+_!`|7#P&u?BGv76m4O{Z%mXv)e`$?Fsu@2si_Xj*_6LkYx~Pw6pT(L8vdba|lO zN6T2p-QPtJ)Si-7EPklzG+H;IY;O}aN|LMWtr}Wls_HEU(l&KfYP13}fixr(>~XE_ zi8oed18H);^V!aK+xN5HSvB%yvyyC;&6N7=w3*d|W8_?Lw7aFc$9MkeTxuUWe%oGY zp_hy;NSLeU`#p#1Pcll^84~5-U3GTPm`X0~VljrV>IC#l!HRH&!PZ!IV zJ1eUk06ljU(sq{VHh5UO!AO6!v@marss_qTYs(iacnU`LmTs@@27UCuY zks#5GS?f{Am7r#=`!1|)>Zr7)0|YkhwHhF>H30!17yfDUzrZHmAS6Nk@NJbl_qd&D zN*b}PmBs$Hta}!mpnI1CQ5;0N#NR%4&}h-~{YAqG`l00Wm6WxHqW)JKk!lJmtsz;V znc=S6h2cHk6Rf<*o7=(#xT@xDLc@BZG%zqi|7&dF^x@wY5sLz>95dtHQs%q|FlD8_N0_imp=J!uPbd1TYui%=p# zgM|}D__+J@pN|nlZGev@xK_ruv~q?sSeO#t$Jo@dL#jWnpFn^0s@6FG!xLOY%e@M;0fc=)pw!GUD$!N>(F$#bj zue}B?$4{*6_9c2{YV$9jDjOUFpRvS`(xLHt$^EQOQI7kz1B|dU-?^Ngnr`|lwP4iW zxr3XS%ZBgoQb6+%UyYdpii|)2b1{0daWA51DS_bJx&c%d(JT?u@oZTI%6pZ(FC?ge zqg4*g!L%rSeCe6n`b2Tf5I9<$wI<+bo25rYSrr>^*@P_hY9$ zS0t^F%jcMinB>@%Y3*90-z}Vllw=n%Yrq8qn>Q>_Mfq(-KAY&(!PID8Ii{hf}g8v{;Ai`O$EPAA)P6#2$>B`&nH zT9DPVF;ycSDYMudQqgiqm+5=yE|{JZo5N%3Hcu=p(0ZJW^fod|*Prh@5dtqb>@IYh z$nNZlSO#eHM_B-PDM)ndE|(*_+u=h^SRC**r){+gG#KGQ?quV%Lt#r*M?uU!$N!3cEK>;( z0NVj&;B6#KX)#~NO(mu)<-S%AkZq20l}9SYRi2sWdQ)+ZfK_7F`V?{-f%lyUG&_Lz z{eN5n0Js3}``+|*0mUya4ukiM%vHHv+~A$56$jPmpNGW!^5Jq@Ty7H8s@meQxftbZ zs2T}ak-@zJZ0nd7ty~J3eHbSh`AmJ7)J@!oHf88TEQ8~sy+=+pyy>`u69M|zp&PE- z%B^gZI$7NFiAiH#b`w^LDWJ^PCoF2#^O1U*Nawn}YlXjMFWY%loUfGd`l-c{nx07+dBy>t(&$oMHVC1U5^ujjSaw#~D(cTz!2v?uO%dPYEd6Ov5)RGsPm6h#%VKL1G6)Bj2+kRvD zF^D1hYTcA1a<-46jE&u|6h zPpQKbhvw!{uEVxny649#B+VzP@K47+wEl6`PP|t1`4sC*T>9B%M!l5CRadXGJ7+Tl zrIfeXXFonnGGs<#X-VVBzQCV>_*T1yL*id*aAleJUO4M8z}_?YCVtz&sV=m)w+E3U z1XdIn9ITh+MVg&wXI9$pJmg4z5DS-hyBEWH16&=`QCB#w1(bFr1ZdrUU3HI2S#ND@#n0u|6VzHw(BlV5rp+eH5s@0QeX z4El8q+Hm+c9-D5ulb)7{_4>w>FK|h@Jyzn6qL_wiQZHH^w$J@MD3WN{Ebp<|z3e^S z?>ySsXhAHDlgxDrq~*jwy)sZ(;m7vOWkH79?zgs^=NOIW-06;fJCl{S?F&i0??<Cp>uJ-lG_~}3{s$e6%ZjF1Qg{L-#UNWCJTsYXwyQ!ma^@a{Tn@1vY zY0%%hm-sgGU$y);j_cAkgc-RmD`cxRy)(gZy~z=dlM$yuNl5v9OOf8)_>IWHJaqV` zGi0(dzTh-ku17ZHRdt^P!Q0Ull_i&8Tf8t_pJlN!n4zp}6gtF&ymwT4a*xEKY^F!^ zsa5E%_r-G0N{s}K3TKp=@f_zKYBa$zHEXx3+V&$0J+RE~Q=)XoW>Ij8d_#h-E1 z*l^*{gPWX8!ZLFSnb>n4bIQ|JBMQ-VsZjkH-_It}GxK@3>R#oTNrunR1%1i^YZSmE zhi(<3g`5X@e%V{4JZ9w*y!UsBk79*&$-;DT)Hk%X!gC92sRv0uk-gA1k}^)_e?D26 zTxfiNgNj8j8O{_Q_*ty)Smw+)t)mgROb+{>b!TVCUL)jz+ga;e`kpmLqV)CMlYHPM z6!wsH_M;7+#Z;A1nbRD4f>QRJW?ne188b+i$ z9(+U%89KC#F%YlbLlpxs!{_S4RR;?F-8fc4v)HFbDjQjwyON`J29*4-5Kv?I_q1$f zMb&0MB9c^mv)2i%LjD&yVs)~L720-j=#}#j8MP3q-r)^-@gf;N-s}#qQRiYTgK0?- z>K#7J>T-IiJ89pyV+E4SLTs3f(@oy>FK32GSe`qNv}GAV^xwi}J^l><6xmGwdp#R5 z@1uE;=5aAyvBFyqgoOp~)9wZbGAJp@e|Eats`S7t_W45=RdMdv(Kr?rc`!hW=mFoP z6~qXF&cU1pMLo!r8P$bqu72N*4u}MVTRY=U%=ACuqxn_&4e4Ubn&I?fcW`Y(Syi1J z=2wUIeaeETc~ij~-UC2`UU#k23eb*(e%b^oqku>%E)eaRrytOL;2wy#k^63hoMI%q zP^fZH^&BnHrloj`D3DxQF0vWi7{5PeE7Jb?M3RyrEpRc{Eztg$sbZq&OVbR^_V;1G z9R}5RlFzIq0U!rGvt;EiBB60(nor12Kyh7m8 z-F+1X_0#}W44v;6gP9wYY4PD*$wbFxIE6xj4TYNQ{m8y4 z<5vh&i;ZM=VcH^2-1!r`jJ=!iT5W2001|GVDZjtys#P+ATWNWATZJ>y%svY;DAWRm zF_Jf;;v4pOq;v{o-bNaQT~LN7tEec+FFx}+(RJ4KsFppKk9Df9gw^ah7k(Pjal6c$ z7;IP7n_by|IIfiYXR?T@^m5Gr>twC7pVf5FuPQ{TiQ~z~3Gv$2D;JTKJrZ(&Rb;R3 zD`g$Onh|4*MeUFMLeN;Q1v|I*Sbxb;NuJp>(QQHSactj~-`mv@Z>$C}6`PQ3YfL); ze9epc;s{jKQP1T&6uNGOu!83Dok=@(|1I}|gN?=;M9$MX$gST7HvEkmTlS-~`FTRY z5U$>hF%$TvhfaF#wqz2>p7#VSc*E9rCGfF*@Vk&I2P89hE9x2+T2iUSrvV`9dm0T6 z`&CnF_vYn6g>T9T=9j-r;EkvoC57C|sZQN6l)9>(11uVDuMLF>GS4QW;qz2HQKp;;K^!i84Dsh^&4)`shbaE}rKyJX+fz`R@TX4504^|BMa{ zmL@j?TEU;4lpcT&m4*5eSVGi$jG;YSDSnmR#`jwr;`p{%?c_XP@NR|zc2^dXDuF5; zgE897K$4RiQV9cp{c6cfi|-^@G%j=4$w6`hp-L_iQJe7z-4RBS{;U}#!kE)NDhR#X z^@CJ?8Tuo6o5O6LIjqtgv*(qvk8_x8afeebap2Ypw3#sxHFN)y1zCYFy5yfdx@h(4 zMzzqnV4b(LZqY`gw6uL%6IRXiE5+LDc1FIQclz&@wpc_8G`q|^dK(L2u99jqmpro- zrjP0j!#m5l*6VkFi7(5NR?PXZW&)0JkI!4Z?v6nlsU3U-@8)n`e$?1wo3rUz&^%CGEQ%kdgC}+uy%B$U)={_~x}gOEGC1bbsQBJ+ zcO)1TWwwc@z%4&hfBwl+TpLarY{4U|*m+}dIfTy?S7cb6vl@-Qt$x9yuWQAw^i+z4 zX|?xCetdxmu=rr&X~v2S)bjBn)C#H}P|`m3I}XPkukUlGD@XZVC9bZVys$JndZ)>I zPx;3#OCK6_Bps12&<35Qv|Wn8uvUr*5%)$`K-!cL-$q&-z74tNJRi0rV}1A~=RzUD z(ffkp`J61%RL}PQxn-(fRA!-h;)go}pH8_Fz1~oOCbp9$@yk`OgA&rlUe)#Ax4ZWe zy$a&h#~DVzM0@db5~u@5cI>M*$z{e6xgVqT4G2}sQ2aF;4@voLN2klJ$D#-Sp-6t{a4EAQ7AQ*S5y@%8Vq2z;(#-!;komQZ^bOVpw=KvWnZ@f%zO6bKGP9kPK$jr>l%>T8Q$}5F0 zU(b_sm05b3RBj@*<&m^CwsZ^Q-l3zLMA7x|Z#;8PoGv)O0xl&&56S6g!0gFv1A$-n z)I-?_z!>P;QLf^)!tnw|c?t!mUr78KWFO7Y+!_ziVv@Q&a|sCxr63BxR5rZEn6S9K zH^uV@@!q}_Ft8KZn7S^L;;R)tpwJctSMp!3bZcv%nsbPo-dU))G|o=WZ;BD2Hpj!- zH#w)4vnzJ9tIz?`kDorDcguW(G78)}`W;@-nu}HY5l-&xh!b7X zDmyf8z|VR*Rqz~P-6f0l9X>;aiS7&BMM^sJ-n%0h(kFhBw9B}~7HOHs$}UJgOzAf% zhJ%YBTW&m)C5WE0YD=NeA4TUn8ZL$_r3}pg(Alw7Xt>1R4C;1SZw$y`k502nIaGYw zIeoDDiNQdO&U?DWpOb!EvCOdK`?P)CtCsV;hK*K=ss=7tjjE;8*}=07(r`hNgGG*~ z_aNL?+E&@WeuL(x_r_kGl{X^oowi$!{$OTGa!vz`1uv8tsx|UDeWwIge$DFT8ye&f zgbN+~Fu#gLq1Ot>%y|!$EEm&XqIf?Q8}{2GFQx3xp_(p=Dq7QZw)*hVToGRmU!R?JlHqJcGEh6n8pA+vJ5S1a^0EWy z#-J&S$*B1Ue@|dCB+-wSxQ)T7aj_}YQ*ml}HoPAVtD>mbtXulkeSLii_3julkBfr| zT!!nc+^bj7z23$D)+g|PT~EYWL!RiX%0W5o&Tr6qeqn*nxt8Bm*vl8VUsdV6yR>XT zSC^6z^SSbm!Z9JrRB)q7ZW7_iBlOE{gAKU5kgnKr<+*P7SvwzP*fg>q{q6Gw4cJ_~ zCyFtIHTaO`tk1pqIA&lSJOY-qs1SCjstbr79JR5VY9O^xqUi0~WsMMM_v^ud?G4`` z`)4Mo(Pu=gJ`DaR@cgGTJ#&RG9WH*3X=YqZ@M+Hh-C2MRQm$wSjMP!Y#QHCp#mt`j zB%kvssnI5;eaGEOEy?42WL~##1hDTfQ9ojeHepcB?Q`PUCIri~hQ(#m-CsKdCj>pvM{c-C)rJDx-Q(UFJuc0(2o2ZQ-ErRBARWOrTVvhU zCE~lrRriAB<9ngx4G-%pih#p zWfKzMR|VKDY`Q*g=LhK@D0QaU<}7s)eeF+hkfZt4eCwIa^h|tOuikzK(QDi6Zt=8l z!s3r=D13Y*?3!aAEl0jdO;?h}P#fUm}=Zd~M+i4^-rM z(s?|~EVvz)E+joe$J~Ufo#Oqq3AWs01L^yI2`-rnapl;k=|{f{_pW&i_1M<|Aq~zp zcPHtWB&ZYDTZqrkR-h1LPC>1W= zB5>iqkUpSJ`VXd{AIYs>_SO<56j_M7Em^LsF|3K*no2nPpJ`7QlM@AMlnIsdpzN>Q^ee4L&0IVpqa&N9@?WDVkqIvNxnqH36o0=;nw)j`Rfp$?6-GIOBFANL2Qn_RbV=&Kr7(Y_(^c?eoMdhf3pOBscR9)5CvdLC1uS?AHKvUtv{N@6%{Cr8FTBZRqdQpR;0;SXUpin~#b!8- zXHh{6Cu1`iLbi-07lt8B9fdKSmG*>>x$LywV`KF}Ovn!#_?^qLXGovXNS`IvI(b-(DVzS1i1&sHIp;`+bX4+IfAxthxv4#i^41W9DJbp0x6-C zI&BbbGrgCH0rh}3p~!b1n>-3(I_yTiOYB@nqtWRzBT?J!h@m-uRSK`q$jhFy#&w?Q z)y2BwY%Njj{v`FA%{HdFmQRw;WG6c(zvRfF`J$T_bfn%cNl2N`Ci>j{J0~q?S=}{z zsq6PwbnUGThVIt>mY=6BGRGJHFu<5GOCs7yM;)E7pDnbKx0zYn>1~zm29nKs5saL% z`^pXs3oQRYlCpcfi3iZQwA1fe2Wh(5LXmK#}dG zPUN+w0?pKD$3l<3uWbt8&gw|+r=^k4=z&2taOo9&TQK6yFYX3a5cQZou<*TQ8P`U8R zLUe#JbT5#Sz7abEvbWO}R(O0t9Sp%Z-U*uX3h>-Ql3WkLfo_+5xj&{mg3DXlduN|O z5GH1F$g@u2s1kECQ&{6pa$M=Y zTl2vdd^n$aZ0pD+9 zDBT=1#_2uj^jOJPo=XCS(exxcD1uKu-?d#~tT=lsjCsCS_OaP&bI8ikVL;*A?_#qj zmOR-bHTT8vL{hK;bdQ+yF0K}wEPeU1+1vM#k#j$LIdPE2g4?M4zQ@92-Qp=4MiR{) zY-O!G@ZHt-`zuiaR){>6}=ti^dn+;b~8(@$=f zOuEI6oNps%(`%+rcMpyiV$!sh{9Jbr1j3x2vlH{&M7m4(@Hf&dm3-2K?=ynP$nY8+ zpY=4q9jfd9X`}G@5@7UdrGkcc^EVAbzlmDlh|W*;F>gnBjP&j~V}fIEnydxyMnV;y-?_Z9M{E6fEq*r0_;;)U$*wIYKM$x$V4>8r@IQh^^*3`sYV-0<54Y^NS?S zZyrH!-Bt%w-c*|jpFh@=Z*ygmKd=?(WUrGj8>hq_nSAJ1XMl`mYYrdRqY9@Qq;FkF zP=`@pL~cvU2Bki7Q2fuGVTZ4bS*6zCE!m+L2*((Kwg5#Q{RY4wP^+K&A3A%7Fvj&!^9q zX#4ZoO-}1}Uu|2g?+l|2*6cn9+AuQVrBCnABc^}y(wOA7gZ|FMLtlHv!es!10Yz90 z9jg6k&UcQ=62N>ynVwr<_#mSBzndo3%hn$=IMXsR zMgW63R&MSq0S2Z3mIRhKgbv~G!)&5@$coC|o>M4<)GX4!&Y>5_DCPFE^jB;+mW@}y zLE1Gj;A{!Ji-bQF3jun}U>#&bT%ERu+%?~K$;*FRQKvwIZXKl@*R?`()Qt?;nIRbkCS+UTZqN_0|7CGYBbFy1RMYb#Agu($0YYI;gzkpqVv7egNeS z9)K_9?>g%7x}5JmdMoWIyVa2Qtaw)Y;gQTR19Oxi@l~K5L2plIs%lcKPGHu3&t$X# zNm@M7$c%2R?TJgMfLOpW>NxM^Q7yKOi~zaKv~0NyL#;ZPCDsVXB;s*oJ2*)jvi~si zvHuB~!R}erPB4>Oj+XRjvCBhfsUvH#Zbbx1)`yVRwlc^+3&ZJ#)0&(Xokx5-fA~Pz zUO`I7umj4$tpro(g6=KyA==Nb^m7r0CA#f=b(pR;U16V~IoYp#yIDhOp66n)`eZ^`K!5cw*sX=V@3D6|xW={Zv5;#qaZkH#E?= z@|_>pqvo3;?2T+tj1yIAo$W*qv+wy+Qaz06>BZ69S#2qS1LQs+Gt~0jS>>8O3zKO- z;=i1)0Ms_iqpar^+H$cvOICNY>(%9t7s$UF|7LWum3cnWskXS_ayx@C?k)hz@+lU~ z^r)tk58wv<&SP1!+DX^@YS0fq>*JIAWvT#+&f)?xAWl@u(bpT!q;UrCjxzEAYcq=D zN+H~lVosKuYu))+2H`alwqAPgsWN-`;&Pvyg`h=;-C$bdk3KI%IGq$zlbAg$E{){S z?nMxNdG8-TZ@}E_ofOCa+rtPI|KE&~UFs2B$NdK}pVi=7@~)RC)qmdGJonGmcHGE4 zNWqC{SQW55m0)<*r zK8+=8n@)z&iZdAW1=mF>VKv<=^CT02xOIBXw7WB~L z8G>@{FgzY~Q!4T)8jK1a{_Dy6jK!`Q)I-IPM8@1v&7_RaS4M} zV-TVQB0V$3(UdPs-^5GwD>B{Byi_0@uooN-@HwCaFA7(+bU)5ZBA7`)>|?)vy$z@0 zh4^KbXeC$^IlyWP|BJ@~ZOi~=FbWXRV$y4nrfohZ1%GQHX#@Jo9^SA2b}u`rEh_nu zarh_Ehk{qqfrC)L!9ax`7ni>|UJ4_=Rs(&YEr2UwYQhD|vO5n1EToq{DJ$wgF&{%# zEHfhDYg>swWKHF?e@Y}_)iCOM9xSsSHQ=?0CJMRB@jmr+_oEAHzCCbX%vuMNwY10A zpJFBYyf04H%eyCsWj*54WcTTVOuuM!1v2{x3r+DU$evM3f}o$`(t;1|l>kE#Tv|oj zKmONp8H}(h$Mv>0uP}f@#K*OSq5i%anDZ3bcmfQCEHQBgu`umLn9tzIeQRD42jp^} z{s&@!#MbXFPFBOEWAHCazxD3i9N1TgI*A?xWiavF6opG90r_&oXc5f~9x4T_k;8;z zn}HVcb(#XyvT16An4+&o>k+jXL@}+Nj?f37eWfn{BL-FwVL(VQ)@FcOWg1|wwKczj z%3Myex^02oebfNkJ{$(uPDL)|bN}2oHjH z;QUwRNRI-MMVqsqKVK2HZg7YvBS6Hdgy--V=P|61K?WHwnqYE9A-wo9mv5`U_6%Yu zJ<5G3Q+l$+69G2QYEx*aWaxCyL)gz)wa(d1RTHOy9D0B%qtXedMDVt`I6YWq?mQ+b z24cB6Kq4vB&LaiL!WM}k@H-%fS*+2wtpEy{=gc*1fITukJq=8-_sE0`w*h2Z?Iw2P zSRZgdM)@D5kGxhx>0d%0IdmZa_fqq8tt|4CH-%tOgtykmX<=v73*F*R50x&@i54t6 zqtO=zLrKao6rQ!$fElCgyryQ-^ggfq$+M07)3wkLN!y8zL$qyl3QFb&{`XS;S$f(Q z0jdgz=5u^mNYGE6j|pb(y4A+sBNJ|`yb(}-`Rrf`D}L5Sg?DCCY~=WliF7yEpWJ+8 zql!k-J2wV+)EB*)G$0Eq?GYU#Lts13vCK12*k@5!+{ul$9m0U8%KMUv9}+wDE81G$ zo|~y!m;zkzyFm<=bAQ?chKpbGNS&_a0Q+nZSQuF`5N_!CWjL3X(;Ob_)O~y{NEN94 zOuyXja=`xBV=JxK5}XXYyGMxME@>$<4sE~FiQ*7nYf4?QPQkPoU$v<|t%N~i*WZ@; zz1*pjl+pTAVs~X7w?-X}=w%toaA@!Mr+3Ws2I+5qUD$Nw6_J1l^G5H*u z5#|O2+89i|4-&W?Ph`4>&vd0kN#o0~JieOLZPgE$9}uo@u+@ z&Tqz{iP-yWU+L>^cS1>qq-C}~64}5QtMDF~a-Z67nAKP4bd}NV@&sS`vD-YifDUgt zw-rvNVeKu>I#whMwwJFu8R%pFS=mmc#MIS1)i$GKFT@^~Hnln|Uh6A73*Ak1@}Bq+ z%-kGRSV^QL>wG9@n3Zr^OKW`vsG?p1by_Z;Vtkryl6w2YDB1UicUpZ7wvFk)Ub6Tz zv5DI&({&~8*3?%t`B~iw@b-|0y)lU~Uq<)iy;KI2(XRTzfHXxuhqlTL1yp1OJxXmkT z@f$j*W_QA?B5ig;vpfH}x%zk2r-O|PT%*;(0JmB8U)+Au&7pG&{ACA-Eikky$4hMN zf)6f(T`U&AniU!lT+~vWx9iIsvN>YPbquB$_yjD3&$J!K~o2oUQ=5?N5_=T`@B8r}GTryUVQMs2=2 zl1C>nzs(z&i8s$kCu9;<2o%Ql)3t(-t@RcC>&2g7(0?LDj06MmkYN!x*6HUrx%^Ly zoAb>P={bw;GhhcsjdAdbKNf>D>PlrY`PAx zIy78v|Im~YSu6Zt+&eJvAZwAs!+&FtTuM@z<9M3&vJ%azVfazUw2VqNbGt*bXwBxZ7=y3&ITM>hM_!g_Tg=;v1p=vW6kKd$-- z&nYb~koSO{#q`w?4~~1oz$JNPFhbd5hi^VD9=A!w{~V_GQW-d+@Nu#j4=1H!LkNR? ztyKcB1de&mGXl_}1-T81Cu4jM}Y#D}h@> zXlG#}2ojD;jfp_FUd1q!3*?MJ$98()7%=H1Svt|@KxkTKUZwi?f_VPl7vxzSr=}P_ zX>k+apX5-fA)vo^Ms+mJfbpP7-t})z9~;4qBlTX+iE@EL)@7Yb6Sw&LyV8k}CxgE< z7m7IPSw{n~cTM+Yv=O`5FD%}!CLPrEj~!kXU;%>p#O{xa18)WB^y5e7<7Ec^drK9a z$-}afiyU#BAjn;YfTEnMuJAiG#wiJDS+~ypjnl+{QAU6#C@n{hw#*FN8UW}(Z|2t_Xjp6K9c7Sj>)+;6u0B{XekO#8m#{isT z%ZdkfF_aoSl-hqqbd!V$$nXU#Fk#}9)eyJcpnSLE`7gKc2w{G^4XT@ar~+a{Lr_`A z!9-H;&UULDU_pY1X8}V7FJOZOx``W&MB3NX4f*xR5JmLn%Gh*YN1zOPyfYwk7_fK^y-1CM|9JD>`3dkO$K;LA@xYJx0$Tbh$TW z=j8l;{}?e}1~p2UxfF-&dY^3~mRIhtlTkq;9LMaSy_CL2mXdsYo6RTw2jhkg^CcCn zLIXwVU|nb?8PNNe@MP=fWQRP^NZlbT)-4J*VfOwkT9U{j!$CD=bhFQmUmv=~{|3sz zsbq0Be(xB%@sF|aHb+GH$e|Pi362+}y!fb3edGyP>_Lc_VH9$@SWG18MSHOI6 zk58!m3QnF@W{5}~qK@wjrl5AO4dWC?_35~u2;d`Ms;TkT;Z%0cPugp^V*O{d9ns{HDiQ;=l|Gd%Eb%Vfl<#aw;cm-*sxYn|k(a?FH)Nqii@I){}ox0+UB8bY6) zRTmtQU}R5*AfQIbi!Zz6wvnB16Ntp znoZ6+3ScKMDtB}>INHrN+_LaJp`9Z?p#u&>81iWkS-V06m|Jq+zAOGY%jcXa2XYbk znkVUrgwRa`+W5poEN|c+-jNmc)%4u!bG5!8t*X;<@!<6*I(#?Wyn1Xl`9zK8NO-}* zKm2+|^ec|F(s7oVM#k<*{)O1%Cf8D76I&xm^Q)TP|L*zf+e*lP5%c<7;nTp->oRiL zmG(MHYGI@R9Si@P${!5vZ~(2O^8A(B0~Bmp*N~$=3P9@%LDVMCueqb5=_;>jEhFH3n zye?VRo8`=O>o^~o&3j4R)zg#Mxc@!=Tj3wJCElF6U$w&$; zBY^ibHz?LP|Aqc1AZm-sLAdz{JW-`jl(!XU3((AiKJyZ-%MNGfzp*#;%7IYhxv8 zGcqTOac;o+uR%&bMLy735!*2-3!udOmqZWrWCMeO5>5|ZYu7u>LB}cQm;oGYJ#GzD zC+PL9Uz)OtEglCuKNTqaZV&*mCerzz_Fj7qwd8m8-RWTX;87HC=~D3ng!(_WhZ|EC zq?m8tp4_Dq={r~OGZrEooag+#9|$l_Q)MZo2mBc2g&roA^9F5In6O7LRUq0nsIS`|{peR2fLR9I&NB!mP@$9$ zca*>0Q~31a{z$Ba9V>|9iJgxm4jT@CJ3Y*2bVmj}DWDIvqB;8-LaOLx*Kp$NTJ#}G z-AY2|)?)r6)xpWWzW}Qu7M=+2?_XI#ajZZGDlqwLr*b5P*rC>B>&kyY;}hQ`>wgFu zU`q{w6~5e+>y2bY%QF4D8XL9@wuZ$aRA3@(ZC$1wt9khhi%C8*JzB4*WX{7l+ioLl zhC8sCEI|lc*y01l_G@+CU7F{AU0z=1+sxa%lIe}{iZ_kV$rDWhSyBy%dF()JXd*{wg|Am4%5{N`;duX3Dn-4!I(&O2$PKJEdip(NLS>l=EJ>wZPzTBh#a$S?S z0=2ojU={evzr|rr#>jc>waJq3dZhC}eV+1Vuk58CKnhCPv~{qJ{E{n`I|ydJlE{pX zeiy0Tw1?tR=T1mT`5=Rlfdh9Wb+dmgYG|2Dj48}NPQD!2(|x?{du1Eyt}1%xP?YrC zwxVU+jzm^gw&uLsU@4Wr2g^0nVy9;}4kCTz*w3Dn+;Qo3cB4lk`$eu-K?9EYiG=go z`$HWW%y3nF0$@_vbaMGFB71Rtk{lRSAMv)fRc#ZZB!CJG+G?AMZ?T^dgO6=cvMMcl zW_>?jq~>lta(cl9@!4zq!~Bb_A_;$ofe03RY5UzBd33+;I#YAzXjBXKF}N}G+O zOx`6`3q#O=s#ZVu6^dhxrdqUquYP&6T;9Xn5mTStwNI4>*h1g!N1tMYy$OyFqIAUcBqiBhecTbIDur|SGQ zxgPJ_k;OeAUL2Rxfq`&sR?G_Q?5+!LxUPkfw#5F z8#T=>r@%cCf$-`;wHK)o%?x?sR)fhbLvr0(dDrR|$cyT%tfEt3dRL_3r%Kz$&3P

)ajaE8dKy=nYCWkg6XzpA;1Al5*e|s{TS_Ip8k}|NWn@xp)b_ZpPF&uy zRqL=C&0pCIX4d2;@pdu9biHj=_Qx=(>;O7d`KPoheHu|m3|lX$l4s51QnX3=K(u&Z zG0|EdQ&A*i%=9x;U?FdxLyy_$pWTx^g|^|zx({(E^*r4_M?oCwsmWQ~|D7Kp9R#ZZ zBz(YD%bhTB2Yl`%5FV4bZ<`g;am2Y0ij({ug#PP@`^$BI#kGht)LAeD)n(B!8yG* zdWdk<%-B$?=<-3Xe=$BSa4G*jB5bYZ4)H_%vSsGm;??oq<5Nt$^g$Aq%nmQDC8_kG zZzmQ3K8NmmWIt{<>W+1BYlCLwH?OOUBeYMyVSl6Rc4VfsXR40b`A#Rr$!u2o&Rg7y z+Dj;m?oNpe(w9^%(k(9#A5S*Uhc6|Iv`UwBEjNJhlB}n)c20?nJ0U4BonR$=j=VnM7h*4;lY4*dn$euQ0xjvIhFq@JO#XqbpR$4;2JKS2)rDXuV9vt zI6t(!r7j;C{73SSsDRebc(FJfA0HJY#&8?D~I1va%mi`NWahs zEtymGmH>^^y#s7=t`RBVKOlvFqtgY{Fc`L8?VM;@SDKWn9_I~ZJ^61Z2LAxCRDQlW z5!ey_8X(N;6gpDr%E97dKl`%h{WYKD5gPKl$u(`p(|=}He73$yCmOrR5Yud&DFl#( zxH;Uyo>}`&(2liCugBsL z=8)BkasBg5Wrmv%Rscd$U7Cqf=0)~5E~X?iLh$*~xEDRKw7nneys%G-;yAuVV%#yyk485l;_ z8v|GgzMg<6hntwqj`SQs)+rv^>4d6pWm%dT)#ullf(i!vy&_rJjF~odOA-%NiSGQF(^Ldh! zywE`#Zy5OC+c(NEhKsJ@_Y8kqCdFj#0JAkC3`IW-455W-QDW+P-CuAsiFd@ERlldt zqo$^x$EFx9HE3qZT_wXk; zE^hutg?6*Hh)kity`!ZhQ!zZ>(xpMc_<2;RMI?XD&jms7bYzY-w>|RfgMpq4Lwuja za@ldSbSC00+$kZL*=LeUBHr2Mdk35!Fg%;$YB-^xUYlR%i$S+`^crxW9%c%6Gde6; zx43LhKRUv~quet!ocg)%zM-JP@k%-{FH2=oWtWmb2PhrvWVeC|tgS#1u_|0c?%dV+dD$*k|> zBa$(QW9X{R6thZ}ubBnKun)9$UM|I_+3BN1nFih}2YUy2 zhfNS6aL797?`fm(uN8QyMmwF%hxmfUuZFW<&;%=Q#|J+Av)#kt&CmvB4apz2VM97} zHD%!J*k1E2i#OI-YJ5)f*;W%8e=NYB*AkJwBmH?>_Ix6CsM~MTj2ZbMDGe)dM$yeN z6Iy~d%+Ws~xl3wlYgkWaI$+Nm(=X&CIUaQJ*4(fv>#gtcybCKNQe-H|`NeT--^`z> zp$&Ua{#Qu%!&k@;QNRL|n80M#*f+p<<>wUZ9U1P}=bw52=yNB9Uge!fSS^KqDK5m) zNlAi}Tbpr;b*ze2+i)ptboo%(y9Y3R`X8L}|3H<`KnXQ+upb}U9B2bey=50x+GCI^ zGnee6%M4NOSsBE53^7Wza~xlm7-vMlrNr6wc2=VQ#g4@|t z*&;QVC|DI>sE~gbrwmREVI6Du=MEl`+m}N#&n1A_w&oM>9Ja@TnDL+frh2PqOppTm z$CE9-LhUec3yE)YM_E!&bOUORI6GUWG}hM!K4)J=%`=28sn-`w5b2s2ia(HLc>WBBS|rA?ZkB4*P`P_y`O)Bo`r?;THAZ%x{0xdy{XVFA z_+|SK@}Y!-ejMy?hC^c$xL!teI&Y4szjkbz6!W0~%DEn7$v|B_&-H4YnwaUR)8kLs z?X2!2KXs!!pQlIgz%XwT(y;xeF0_rE?I~u`Hc4_{1!z>hqmhS>? zC*|s66?4@X6Wg%sfBdhix%WZ>rFu`nO4SB>T;ruKkqslH18sgV{VQm`-PPk>@`xU% zD<@?CEhIUgCBd4ox#u`};rOS3 zyN3vSyfJio>GMrRMhM~JbiE>oZtQ)(dMGst?CFkVj$~NeE!*20kkWTwR#rRfdqv6? zsfM%R&t!(^RgrV0ydh6ayDF%x%Dw53)VdJL16$%?#*t`6>gH_J1co@(}BR>DX+b@QGdqYp+y88#Fa1mpItDAksRH+LiDtJ42M& zq4oH8HqePUqzNX%az??7cbdW*JoY?uq~aP-QgIk-Bfc844aB2^oZSXfgsqn`l>K;c zYRr%!;5uHF9zVqJf3bCzQEheKx_{e31zMzNixqcxfkJQ!6f5oy#ob-g7Pkb77b{-e zC1`=-lHwLzf@{$Pa(DmdoIB1P35gW=wY|=^SmkEL-rscvL_`yZVLw(-nD>mbJCWz zeEb5V_;VOzZcto%GiS&08EFnVIaYX@=kXAC;p=( z#+0;L;LWsQCz^+AM4k_LTdFvx)s4d+11}+NfS2+ z7V56DpDuNN7)g&if4LHLWn75}1o#xA-UV1&a8a6NlhOM=hc1)H&yI2?+4X{An^HBY z-9s6nU5o2NL&=ycU?p)^(ai9LQJYGu!)x6Q9_|}3h(4JigB!Lmu}1g>&agvmsAj+1 zMO|3syylO~gx8v-SWAY(+h$`x@XxN!lTZDstz1FBk&eM^}xRXDLs;h82Kr&;SWK67}^S{;*v0dIr581Dt zIt*RRGB3k59M8rb7}D*ae_qSsa0&n}(FO8emNKP=Hk3}AJQ^8`^cSye3ObOn<4{F? zXAR3+nz`6V0W%wb*wL(aLSJ6-Id67ir6Mrbko^~kX%q5taC9ucLy~=)QqVV z-bM}zdmZt1Y%IqG?D43_)fNBy?*#u2o_h>!ANvnHzKWR^uD@2(t!T&#))L7y9fXL@(8ILJmtL3-SY6gh{1mPL6EI+yyimsbX)Ug| zjR!oVr{+I2nj^TdLF)kigGynIq`JQ$){niyBeaN_g^&YgXQ~!y9^;{9&>ahSN^l-X zl@tIgU#j5EXYCxt^3E%AFF`UVRVe+E_}v zA%RUAK|%ZE+>_1730#uo3>&EV@Tk-?6`u$6axX!bn`9E{tZTkEg~%G`i@<}Gx(7Uj z5%I~^qRi0@>|Y~?6l5LWnCo!9TOBxdt6o70UR$|bgC)q-&jB0XgLqi85(M!TNDvF@ zK(7?egy^41Us>8P&@pa4r9e$WybEns08Jp9^{k>YM>-q{+sF{Pt} zn89baMzW6o0KU7DzXS7^F=@6_cUV~5-#*V}1rI2F_f10is2t3GBHlHK{`b~iQ2cQE zMOg|QXuKHA*7(L5s>A$>fTKP0#b4q}???ACgCA-~7}vu`V*j4I(jm~>QB>rAMmYa3 z0qFiMn9ZMexjmov^_4NSdUL%Ko*A7JKGVRyUev;!E8PSgh0l#Ji-~MhFx%&U*m~_- ziE$5Qpy5u49o+y9dCWtq)JKjWPi3%+&K+m&nxT)6q#A3K1!$TS7~yVY>QSwrkEYP8np?;>J>v=-0j-t?hC`-N zoU_KC<3Pp{4ItgVp*@gA|KccNt37S4*asfNF(O!c^2?YLZN-|Pcc$h06NHB1`(a`$ zfBNrpS3Vwpuc|qWSw7*TUP1eDV~u7+{SnMxx;9WSAQg~#`9B*<)g(*9AY3Ku)2J$_ zZ^*FJY-u#Px}=-mI>5POjjg;&gOEK=;G)cj8}gj`Q~mD{S#Vx`{Q{hoFU##%;-*J? zDxaX!hyF5-@^L3%cf6Z_G&kSLefi>5WP3y)Ip#YbUM$7u0(z17tfBp% z^YAP{-Y~Y$!VM5vCmNifK$#_tK!20!=OX^q_w9zNBV7|8|XqgzhQ&+e%J(Qm90GE!+X_f0r$>gA$tq#xXp?=Akw$J(0N@b<0m!G<9(o4 zmEX*gAy_&g-kA&H0LrY|@dyUAtF-?6HCqMN?hTD!Y9<<7>Im0Orlz$BMI1VaYaOmI zfVc2KsV}IPo-`!!&#*|}JEmrYo`cRP1(Es2wI_a%=8eml{R%%em_`dA_78Fw88T8V zuXW_Wh04(Ojb|rWaRdMeHzYhs+*k958->G#H+53kWPq8!rD+8*?Dd~ zlNoO8;-Dn9Tu~&@1@d1lV;P(e+&<)p!d{WqQwTL(f1uO+00|xX@VqzSb5Kpo%;B8p zA&H>ck;~JTJ@e3e_<%MU?Z{IMjrpb;ET-mSPqC&}L*`(2;(?tcDvnppQ(tN%p^god zj-L2;?YFZd%{8L{81d*{0uy;1cAn7U&p0t#Aqn`*@Yy-&(Av$LV?ilGICFwSM{|N= zcPfIZW!*{XY%Hgk=%?evgf7ynNAWXq0&pBkb-E$!m=_p!_XW7UX1K%?6F;8IB$No+ zxeYJ)YD)4~d5J9e-`k4$U~j-TO4D*1U+y28%#%7+uB#W+kvr*NI;$=Bf(pYd-D3yc z45-XK4@v8j%br{3EP{XUbk7@(?u=SU4-zU1Gl6!t;eRTHQ4y)M-d-*YF1I+WhEFz` zf}ObEbJ@la^&QJYn>Vd{TevscnO(Z#v|s1-kM#I86$iv;%$x0>E`F!By|Cwd8&rZV z94P*J7>}72Rd7MI`&I*wvv;Q>>-YLr>hHE7Q>AZW1l&s+=onp(L! zWwtX1jP*MwLb4S7^1KkU_DkPQCiMMb@y^Niu(fnjxjRB^V-46M-RhFMPCvihx ze`_hK!<`y3Sn?;qRw2hyown=^ze%oba}y?x7&SfRfP z)_yH3?A$CDU;8bn?kw$cwq05?KR2azgoKMx*HQ6xz}ahX&sWplisGBoME}_Rv5u*g@#5l$_pY_gIQEr^jQ4M)$Ou}6j}5D z(GI;BYZ!|#c_l+Ys{yv6QI(0^bcD{TxxUZ0;~JxxCXE zZyf&n5t3Z}72-eyWNTfXe@s__wl7SJrGRxpPj*NOvS+E!FxYc;a_wjcdAwDz(I%Gm zFdE8-mbg;18#+h+&@BrD@VvY-ui#J#kAP@{A!QuB4~o>O=JE7gumy2bc)zb>G|Le0 z42>L_Mwu_E`|R5`gJ3I(+1~FO8wSl=IhT}7E5|ggC_$14iJU?wrlRi zXygdME}#(~3$}@JQcP19cQ;h&{h82_R6$9r^vpR~^GlUg)=9G{UK7&KJz$26>`@+~ zpxHX;~YU`nRCODDaPal6KShP!g#36|u z;%CzP*{i>Coy7#l`hyZ=qJCL(&YFV^^(L$;XsrSla8|N`PNZa{v(oxrWo`e^J{Ji5 zT@i8`tcB+@)kgI>qUl@H$aX3Zpylf43Ut?a2}`H;!dhJM#ddIRc>s8si%3=sjsYVsi|fx;AbNNZi-)%9yI}2XtN040{I&?{kQK?8sqe`~rUt z!XK^R$*xcI4j^pXT#o0$Ekv4-&{N-;?KpYk_U@$b`>ozm19?K*KUCE}`hQ3OEH|=Z zh)bLgqU1K)Bx){?gy%GlR-QJWnb)QvYnc52|j5Ge@QiEfJ^BFE$QbI5Pz7TYF>-tQ3Dd@2B|)$Cr*LbsK1| zEDz=~#=R7*$_E(m{~b98@5>U}yq?zLqhl>1;*OIl3yvwjq!ni2c5r67pWjzrd#t(E zm+E3`zy2S78tcye$Wrs&QJ-&}rfT5ChL~u;ic@pD8Aa^;%eX&8eLgwu8{_v;3(bew zHYeW2&O2w9d_PRDbd12eR=xvYGr%$*$5XlUgDU`h+Vti1>IVM!QSamC{F$B|LANL7 zyqi~aRqp3iYO&xlQZ_BWSM*o1v|{@NMc&}YKC{gfaVrO*Q2&dc0r2IRMQ4}D8H>g@ z3mgXm=-=y-Q$eeKZL^0&f%k(b-$Jt&2zOKV*d@Za*iMMW`K)TLY*tE|>l>_A9i?A# zs=9sr+BA|G@W&Q?W4;U(6rYOirzD4;Jt4^Qdp#df^vz4Fr#v6}JI2eCd;dp683bYx z)Y^up8Z9=7AS`cg-7>{U`RV_$Q|khbQJCB5M0 zbbacKxYM`np{KxTo4`;q?2fr@@`PIS_olJu;l(R)KMVj)J10e49dXZ*+M%6?0s8C% z*D)X=YtA>?sF+PkBF;1_h@h|L;MolQ`lSryQBTkKL!h(((S_@4iAQcIvUkzd>`37)V zku~KaHqR+{<^2Y8SjSkZS^QZx1#{@2K>*DogMHYrzcAArCfHpK(0Gdq3NklgVt+AC z#%}M;{zArYp0k>3VSRp&69!UHVot~F!p7b^CVeFPq@b_(aQZ5JI{w*M)Y4w93mhFLd zhxs?8&tIuccPRVNd8V&Y3iJ%RUIl3PisG$J^FMBg`?B*|k;0>zY=CAPwyqooX!egT z4$lh^+Yx6`K2s`Dw3G20N+>o`Ut$w!%2&%TunuxBpk*Z84|7kvi$zMO_X%0E66!Y{ zR1pVJ_Kx@i?XBRsPG6%&{-(9Ya??g4IWGap@gP#=8nFXTPiG^`f|i@ zQM{|JjvfiFYL^pAeSG4t=ydq$YYUNT6@4i2HXvTIUfDj^F4f{GWGv?XXj=H(1~#Yr zS5bHrk(Uqz>EE9yj1l_6!^=BCyj-rDl65FOKLQAyp$q#V z-F`85zE_*8t!x$>cH(@y^{kObE<{7g{WY!%{m-j6Df1sBO|R!7=Ee>p90O)(;UO3# zi%LiK7YwCVsOG87CMH3c4O%RaOh(NRjN1dWt9)1(OVi2x(utSe^5G&_^9}{*3 z#3C7A{gn})0S@o$L!;%P1dcBEyfelDjOZ$KhHlC3b1VH5Z8D1~mvzy%Sw5qD>s>Eo@H>Bs_$$or zQFpOQxaq9QQ{%=rx~|p~s4M`69RB?NHTYiqRn2(rwuOzNBh#<&kM{@(9#1te`li21 zUgh80*a}DRr*#Nz`qs^%x^Z+(^kUr}`3H=1=N zm<1g5BD58YEH;_BH!3NrWJgmrs`;3x2J(3r2&;z z+2Z5G%YHhmeji(dy2mE3TV4nByacFO(CX=xeB<*O@|L&6gK|(a%}!>u_8CL+CJ7wY z`KYv|LN2zIMbgg0r&1F&ovw}+5~Awuq|2|HXvXc-WvRqs4ebRaJB1cUgg;nPRnS;Z zsq;@c@jXMCC)P9`Tl~=JV9Va`{6{3VmXV>4)^pItR%`THMZon@w}uK`5WvgREf!zC z@#DV?qp(us73Y3m{%(c2EPq1tu11{U6*De7h~3mC>E_kjfI`>}Rquxw05iq~V8&Dm z%`bG}d6!wraqcc*V9zh=rf!jQJfz3dom4Mc@1uI$=H(6f{(&!9CE5Kd&AfF_-W@L1 zD+)frWMp!h?1B17MW}+79ld~?&QWjd&mE~dYDF#I3wJ+$K)p!ziIb( zl2*cR@u9!2|1GCpe&Evh>I-9E$HmMwZiELDNQf)}*dQd9lO6J>0Dp>>GkAMCS*tIUOd65o;zTrUaV9{Eh%+ z16?xxLbD4FZ3kch73}EjK$ZFhLQfzR!6et9uHxRwQFq5Ls;XF23z>h6F8s;sMoe=x zh~E^Looi(Ns(iNA5m9`{vurXjfB7Bkrz$sA!M_{5W#iBG6I)?fvpukI)Ql z6A=Urd~i7%-PvAocsNp9BhJ!Y?p-_7Z&7=Hu(jP$VK~X!A$a@sP>N7?eyFA9>Ph3H z{gC`4-xxtgo~_)vLqO?3)63hlQ6t*MnJ!Uf+&V*fVK@C*n>t!&NXurP6*H%MuvmLg zJ_|?)dx^_Ah#1*>Fdow$AJkMt-mUG0BApJ07r3DgS-9|+yJRHX`(wMlSN{pd5tPl| z`P3}{;#?SM173#{r)0{0I_y3y7w`4hd=h-=21(l5*T;-G#2}s9A@D^gyt@5h7Hhc2 ze+zcnAPNeH8Y0(UWQ#1Oo__&kAJaybk-QmT$5o8bZ3^lN3nji7c9;1wvF6_8!i74- z$|_6mE(NveL53r010=r@K=Lcyaq}?M^xiaZDxav)#RMY67u=iMP7$AOexOOf<5$~f zi1RfgmgDWA(A1S?d5J40y6QHA?3p)DUgcfuiLZ*_y2}y}M}5kg5(sx)4s?MpGrY~J ziBV$p%j5nHrpX2$y=ogrYmeldGe3lq9|zfq;^nAHeMplK2t+@4D2IuSz1Sa7Pyec9 z!P`iFECjy)Tt7;&L|tr)n;|+#+4ZUuel659=cB{>3&~yMo6@JuUuM=KWike_cdU7ludp z!NXA}l-O7I7?aTCS<{%ELXY??^FK`h5YTZ0cQ2bJ!rW#Bo=_k74C1`9r&B!kt6V>< zUYP>%iw~1i?@1^S^c(wgUC|r9Yz^Lt%ptRlg|MC3D7jX%ff=}&JDLF>;novqF8bfNhR%2(^79V;8D3W=j4 z?M$lKk!y#q^VTn}HhN?}%ZsL9&fOr0@=xr;#xXb~F1$}X{{?B6f5J(2Sj`2Y%x1D} zEwhz+^6-!4k_}weQx6H{o0xh;gC5p^z27gfd5t?S(7va+e^GH<89L;{FB8EnXSTiY zC{pFadq*XXue_?ORg?Wr?vr!JyZL&Eo}e*+Qsf8-xM{`PI!oQ|J3j`z_dlY{EYFt^ z*`6Ao3R%;;>;7HOl;N;~%Cvhu zq#Nl*N|QDyepj7V{1|Z$vf-Y^hi!fHVIX~=xwyW}rtLa=emL&4`Mi9dCamC>-mhOw zwC7mJ587PBW9f(j$7%riiLLNdv6)yPtV_5NbYN9q{QYcUEkbsPZ2!_&1i0@5>YfwK zGGsXCM))WKPK3I=oU0kKTuO126D~UE*>}Ump+w$ar(Jy3Unp+KBC;{`kI{aN>Zmw@ zeSr*PH1pJd%!Wik%ec1H0OT=!9@%kKdhp4 zjuuOwq-1?b%{TFD?RZ-{*TIbv=Pd8%5KSKLh|}^xOAK2^z}*Z3FpZaX&>+lZ&LLcc zrcMHh2Ai#Vi$1~8i`M;MkE0cSULg$c#e?=B=R3ycpvQ=!YO8aSzsi9R!1^l;UyqwL z{RO^1cZtPV34S%dQ|_W$fjIYa@rd;T17;j;2&K7NoHFN<@JG7nrTxg!X1SyHmj@CA z9w@K%bETgoWEQ>2sr2T3T9%BjkglYg+rLip-46L@Ki+5V%E#jJbr_vp%$zICfzF;B z?9I+*Do$q1S@s3alkt}=#Zmz=!Z9Ltc%k+g>hX)8mAQ{RNM8+o5RkF{pz)TJD=iL( z#QA5aTYkb<+bM^HLZ(C?73{665+d~uv{%|a($Ztf9x^gomLo>BXnr@nrL=~|9Ashp z63^cD9p9}ivL`QIE*rKxq1V++QNukXmW2hXTug~fd z>oSz*sNMv2BK>k0TI=e7!F8tA&OjvPwAavZP6E#lG7XBEcL$F^tVeL7%vP6MiR;e0 z*AvqC4zs`61YR{DVwYP;gE=$axb@c3^;!??CHro`kpGq&8!~9V%Q(tk-6Zf@DS8p_ zJ4FNARbSZOX5iJJBOnPT&@L4Xc|8r5@(lG3ymjcl6>cF;$e1Oi17VAt6d*){PDOb` ztn(l|qIUxZo~YHY2vIVA>`qa42@{8QytTn?yUepvieZUA{NWWxAyxHGCU&sbxe7%j z!*#m3`pFR|l0HMD3>>8x;-l7cXD#2eS6Y5+*g^Glnp-6i>K)x>wv8-UQPC5lqA)+2 zA@FZ%qHT^I08&!pedzDF(1;uHMT(FlCmi9i5P-o|Pt!0KX__^#;AH%GzcIL*C3aeQ z*>twQ_JlC&TllBKpi|Tn_-ZcdJ_1F}5;3H`EZ2ZHx$479WWzMTo>5xWN!|K2ce zaEIpZ67UCA#o>jTX{R=+uO-m%+{-_&jh!4ZiSARCh@Aaa^-gcy?l44w>C3F@yV={7 zjUG^Tejg!Gq7u{$xjOgF51ra3DW#7KK#|O z*5Qpoo6^;rFAf^ms$v=sNKEXau@`Q(jebMydayhlP9BL*Kuf;vrWu$kpI8kkS5Se| zeaU%b^KJX5)kG>=jVNc)-+e6$sCsH8>^FLBhBxjpOA$|ov}Kd8f+bNU9g#zVLHaRf z&t^$=10`uC38L}02qLyT$eZ;)YNV)-xY-Jjo#lQciN;?Xf4z1_2(zQfj?aKCz=<<< z6=;38OZqQ%B?E(W==d1hp4JTN$rP~xc3k*V_aq{ip!^QK_z$kH6woJ(mT^?k%{UnK zWq9?T2RshkU3s-4xgGV+33H<{x#6BTNEFl)f%hB9z0|ZgODB++@G=l;&MpIFZWtmQxm5QEmcGEVi7&ZQe5W)oXg>F`fWgkGGAUJN zYr?Hnfb0^&%mlC;% z35{z)L?+@|(=p5J*EtU;>o^@9Evx5LCH34^9NH?!&Qu;cOow7J^#_&@O?QHI#*U5` zUP(^&)3S>*bRNsmvmatD``(cRUTpRIO%L53md3#2%Li?8W zQ83VXBdj|`5m~7fj=$_bx1neNYrEvv0A-eN_w#YnKS8Bi)?9hIo0Hr;7D)wq>4tx}C5isDYQ7<%-RKQ@j!o{qMftEP7)UQvEQ#X9pIK z7g6kyn+puX?Z%wBitCQ0?m3PrOEzmnBI@x~2Sy8Vi*qHHR@4rWE^fo4pfzsH75Msg z7<%e(am?fBO2g3m*lw{zM#*G^PBU52ypF-0kB{vi3L3VEpNscKo0+jSS9e5i$w%); zjK?C1_{^H_p6$EbGQSK2*}EjMwko-6l4V*)?XsnYW(oV%y6mjpw;)8T0kEZhsx zHVey)gtzk&LwWMU*MT-*&lpukLgJu6`xKXCdu#XA(V{WF99-3CON1~06Bh~V(GElA zRPPsce(aoy;G<_PH;z$eI<1yy1UY=?Mr@XIjtyGOK=?XZ_Z)eT+a4tqwXD(8hU)&pa&FYZNpI(uRQ>3gSEpNPRuD&oh z+QlWZ+xJvVPmH>2Ziq${&-y*UPw^wbwBF@3UxYmH>zRO4{`v`L%Nt?mtl`i@1!ZFh zH_R6VJcFu4kHD?C0#97}zgJPq&g&YQ`TbiadBGXqSVF(-f5O#Pg0CnMw^}2yI{rj& z(1+l3zHi`%rl4@$qrIu4aK8u7SeoZUhq1#b1w*7mI^GrXR(;=^*pP4Wte%@W4ASyl zb@H62j3!!S&JDW5ORJT@+bxFMf+Hs5i5qa7R(uKY1I5Wvs!SwbGxiPIR|EC@_avX+ z=6zk3RfIZ#XO*0O!FXwYFW748Q408QN;xzAvhd?O^rOCiEgcA1w=`gKSIO#{Pg34F z(Gf!21mc$Cj4#Y;x}O_8?YsLUIOM)oaJS$`YV@262$hm8O~*2!P->1(c!Vf z%RHTi{zd%cE}KVJbiB&;penNW&N$utI1vLLM+#$ggJH4FXA(%-HAF#@93>m)Q(YoD z&|wdeyT|B|(9Tl6Sa8Lqm~-1{&JVSF$$IB`f~D5_uCHN}@hRk`PQ|1=emo3Cbw=3; zQJ5uZ;8EujLW~<{lQ~WEt5a?rsSz=-=CTVOV+ouS^KR$l^MK0KLRmb*Qj2%;n_{<0 zmgqdl@;6QM;f$f1E7%MMD^m=3uay1+pH=r6AAJB4E&~mJp?1~kvL+EgT5HS^NF?SW z;N9pH6}#hlD0H^Dz4oZp6~5$S@%v|oyuwsvhQ`O1p+XBTVFhx2^GHqD?$sEDpzq=f z84jEan_m)!cs0pK??$rrgg~Ru(6c$!TO9%Mq|K(S;WERc@iKfhoi^hd>Du4wZZBkt zg&S%BIIm?(v&#H^@SIMkz`D1Stn>XtsiX|;hVwi0B)+TErge$*X3@tdC0!W_EoBn! zk7U=hR-3R?Uvvk7__{> z#mVYP-yyL@smLS+p&z|{El9*ZQ_o3mR#!Wu?6%qHZ?e+)9g*LqNC$rtd zR@6TYvu56KFi!Gv+&vF$?Fc*DGLfv1VETAi^R~c+Z*a8SC=~AUWtLqulx@WpY5H5X zDnBroB5P!ztI=6T+qtNxDL^|3v$AnrgnV9pzKFfj!1ud#JL8AvENAm+O}L&CY>yQd z#TRKZk9VvcOaKM{Cb+ZV=wIMJ=V{xO^KDbI3S0*Bb!+lf8;ca-T-o(cS@?JHT$J4C zbn$TFOhBTXrVgpJelPs6UOCmv2gboiG~NcmD2H@zo-HwPcI}byoyZ+toob~ox(%~$ zo9&XqN@#Qg+ODcEn#KEwL@R?c2vWXmimx8O0o7@muWEJ*yo^pe=W#r=2WaX~ z0+6F!J^;UGh94~5EkLu?sMG?tzDAooV$FT9aB~9FARu@B=W5vl|Mt(`XUnmEeR-3X zXniBnK!<$uhqyz*6-w4&SctaR+uLp9u|b(;p?YCNHM- zu;R`voq**RVEX%Y2O^bQ+g-VL1furM!?e5QJCyS^kG5Y>e(G{E*yzzUm1*|o)LMK! zKbiZYI~6YN{c_8)erARKbo`0-=^EsPryQ#Kq)q=aNn%C}K4=m3{p_;J6Ev{mtGk84)Vk28R-#IMNORJY5+{BVQ__j#JfB7E;_aDJ$*5sIGg_5YahW{?F1 zP~fLvDDUp*60++CrQH)#86G?#!+iEPZ3{1^;exv3YyYxVj8w<#S1CI4YjqYi+g7^3X z=RKeLYX{)qcg)U*A>KmDa)OQEqF;TCpxZRF&UejR>y8Hhq};1N$!WhNrk=S)NGDUv zB{y|Q4!ao=d=2~{)caXYm=MPL^vj#MF9XUbS1wS@%gh^h2GbPUX5IF1){c|5VeFX@<`k-V5*hbfXU zyXrqKK`u>U7k{uWHJ4ZF-?V1j^H4roYNx7dzkS((x@3PZV+*%>`PqmuNO8&FRy3VW z-P&^O)BQ<0FE(q(;>kLZ>UWE*1$j?(w}G{NL;JhZpdW}V(6O~Q&V;7<#q`38?;XG6 zQk#yc<&WxLTD=@xF?jHspUYIf`xUdNy~vz5bO%3XvQrlr>DS3;R(z_&`9JNoY;M8j z_vfA~n6|~$$>G!8@76{;;n>{10QbfBUHof)LPO3|-Z}gneM*6)Oo^;D7qH!O|6@Bx zVCJBc?~~Z)yF18C*kYaRZb%=2y8LxIpBSI)YPUQ(PXGA3M(#`bD&Xo3d6qHh7c-N5k$M5iXDeCt}O6Z5KU5`H7>KMV) z#OUip!ly69Asbz^PodE=?lX3o+-ZN#=*IRIL-!Z4BHq6d=6Nt~mK*tDsB1oO((^Qs zYl0}>jgA9M2>6qF&c3F^L;R83TJBS*GKebCgDZPj(jKkw{zMe=gq}eZr)E%J zFz9~5BNMJX8H~vu5Xt$z@s}|f+c^5li6R@ADJJgjwn*^4&E)ds&Qy^zvLyckeL_n| zOHtg=X`ybm7O5-}*Cx$Y8do30DFX zJ)sa9mi69!Oy3DCBtHdV?3M{I8rk-GF5_A17C6?d_bz_%Z1K}47m7VNfJ0p0x>?x$eB0QQo-8*Yq{+9u`^)v$zjU- zD4jK2PaT)MU(c$DO=eIT`Q`lQ@pR4A3dWNaL?jXL$W3(_I@x*`kDe@!Y;W2b8Vv#6 zJ!@SKz)fd{*ugja0Ff$n1XtF=dK5Et51{n>Y#eM@zqsCj;)?n?75UDwL>G5gJR>P@ z_Twu=E_yygRkh&-WOiu9{m{F z8zem5;Fv7apq}#4UfV&^O#G?Zx!jr#@)MHIm=~5i)1W~Z3a!c8s5L#yZvRN|{ zR=N7>GsfKgaJA0wg?J#wSgY*PBDkS+oGWS|8n@>bg^7q?Jlhu6Hl_&o_+cGK9UCj2 zMQC^=uqCkVdOt=2z9GWnPK&Dke)kk#s9@{+q~jifl$NNEuK20KNWrypf6vxXE;wde z{&J}Z(Kqn<$e4f^{JQo6W}n|MJE1U=DNYI(KWFHJypE(tHSL|x370Js!~C57^OO}m zoFRQBKToj-=VobYWJdo+ZnYsGviwf~KS@F$8Z0=E+Ri(Dw|AKdsN4D(G*s+SaiN{Z z!jux*(2=R9@8u}N;YC)F;%<7a(8V8?Mf{AZ8D<;d=BOYay4pzWFpZ2EsjAaSQ5k=KMZ%(`8vMF!^i& z4iq_3bJT*c(cm{*@treOf>lN5Hv;LO2J_uK)g&wr=PDhgJsOp>zO-UcrjD6{U0YHr zZL%WY1l~^-kqjt|n?Ma#0J`Q%;00@G$DQyO$iWDBlsM&^?Eh4aJ^^hEC%*SprTg)T z><$j5`+pDw{H zx3f?}B3$nj-O(hvaqTP_KU zhChl<4V~8f2V8ltL2J|fuGUv73~lEe8X0$xxhI%H#MnSr1_rB>EJw!w#|$_|Pbo5L zq4u@N;#;}fnb9GEw>GZ(GrleER#;r7d%#iYb!XsMh7$P-Jsf8jn7`t#NV%s~3H7&? zq)o`seet&-EeICIwc#~)WfCiO<;`y7$EqLdo)YVYJiKMJs8!0o)dCBi#ii3R|ew|j79idk9o;6F}= zWh3={RIowflnhvj`rK*Xu%PR{sj=4W-7DBI>6;3#Aj?1d(Q^?-_!i>s!#S&(M-K|9 z7K^DKF{UZl8d!XFuhi5#dG#v0)BU*C7=jXbjf&hm1$a$M{h+uXp$-N3|=T?0X z>Tm!MKy@#IGdya0XyA0evh=id3HE8*ez#bMTUr)#B#&NY{Dn9NZFxUO^`=m`{55qFq$<_9E`>fb2$%>%oZ_p;nOzcg`qB2Q=t-l+&O`REhUC1;a1-x|GBV#F+fsC2zY z`-!1DnooH8#M|y<HV85Nr=7Ix$wI zs+mC4sP5^Y+%X+n=GO@}K5aKT5EYh<8Eo*q(yO>ppOrO@aK@m^@Q!Y8bWAd#N9sB4 z2s1{R@KP7cdq46(uwqn8YLXIP%LR|-8#4(1;9?qC_;$GKVAJ;6$FWS;&;SX3Zqo4Z zMesFs$7Hd&UTdRosXHSUhv9my|G)0H#Mce>(Nq6cK7=C&5>ruE2Lq`Ycbx9$6E(y@ z5ix5x`|@-^1lw z6JG@TGD@I4CecubmRTm?ZtE%lcw@`HGMm<{GXIG)Z{~(c5;Fk5A4vevG7k8H*4vIt zVM?RjObraaaj9{oZuCPpnpT2)$V>x2hySFzWfI7PBkjSJA;6)$3myuyz4E-|L-~Yt0(eZCp`pl)>Td&=pf1%!P z!}U&kEeIIxo~p`azLkCV?jK+@weaeN$zoDc67Y)=Zpz^M8=E^J|I34@gM$Nf0B%p? z^~J2|DPY^32gs}kfKTU>r_Y{st@p-=T`st$*4e=KW+U#Z0Upn7!^ciXL^IFh=j5wr zfuZ{p2^p7b2F|=}yJjlHykDhHcj%K(RxoG^xl5ESYTOx~%3>rVCUhitd$k)KF_hdp z90;9#zE*NGyOa-Zt5v^Dyy3=@Ul>9D7 z!t9t6+;z!JBQw;MwEzRg8h}%fIzD5gfBOY$rNzprW5**Ylt)CsA|UZ`WN|YBP>Kzf zQ}VyGI%+2yEz|AE&@TPxpCc}Re(n`-zg^(8@QEsJpWb|0BcfEf&u38<;We8@E@Drv zkyyWPCKUcvCa7bi9^m=_Qb3CR84ekp7H=v}Cz5va2LgA;&RENPfo8N^48(@p)~V)+ zWtA?sC{x!qFt_G(5rw;^qQBM~??#uXRUUe(U%ZUl=THfV^>nag#)|#wAu^{wR|Yr1 zp#?}kI;KL}Th>-rI~K$r)z3~B2Ir)|{7h!*tFnKdvr7X1d+H>(*I}6_0`}2VKp6O& zspCSDBD{kSvTRY?Ve|dw!Mu8jMpwQ({-S)@J?P(HVad8aVqdjXG2!eOl zWSj;C!=U7;yg&X2HT`NuO0Ot|J$Js96#;ftG{E3X_;DwO+Gp91*(CVJDwYCN`Q1e+ z?cWCg@n;s*2lSU|?)-h%DL2;F{{^UtqLo3eunmYv8{NlPhg(3hztgWWepHIVdi;0~ z89Zz3vjz}(SmRI2H_<@3{lY=ia)4lSdwZ{G@qq-o+@RqD_)h$KHN+oV1@HsC2>y^8 z3~UQDCw3PhumIhJ-1*oUpU;;NJ{4@H0`RlU%l;cy@55_qN7Redyn``^sYPs~iF!@; z9hXgeE94-50u3Um`bDlmN+c@`VfoeDDO|PpY|dlY^p6mqcHXcyiva%8nT=!H9ciY~ zTU;U5g_{yfpi{>x_y~H`yIJ;Rx{TnzeKr&;o_*`FBIpSBKfJUsHQaPyr~vwW|4#<(Bz7#o}NXC8LlY0V`< zH2f|g@LX4+DSBwV=)~_E(MwN)t~{>!N@2aHs^i}Ht;xZd@`=+70ih-nEi<@T0tq_QD;5aG z>?wLWsL`|{M`TkHbN7oc52RM(Ki{IWq9X3%Gsb3>-ppN@n2V;f+ z#h)kNWMyUd9wuw22ddaqSE<{I1N1GRywHTR$y8;x)Z*oJqc~_=tXA;7P-&~?FD?9f z-xO2Y8KtVK+Fzns>{vlZQVG<^7UX~tzL_}1(;FL_?~45?X>Xl>m}t-C8yZI32*efq zdXfv&4u0r1g`3n2((X1mX!jVpbVOT3_-yL>YmEE|EfP8!P%f&Tvvi+SrSm>Czt-|4 zxNkyyiZp+!f+w@$Gi+MDdud3(<3?NylWGy4WwQ91Hl_j4djzA!bsRKYEA@nF0Q3hhSB&x-PjRO041%n34)`j|?K$S4z zCFv)Gk~sI4^~)ItIDhcLQw~F3RnrPFKF+ZnSlu9~th+Bb-h2zb!)6?tO9OJ5NXN3VLaQ6%aCfYbB@Y81dV#9q`$42mU(Rh`+ zMm|A<9b;eyfA?uZPkdnmXi;p;hL3KhLgpYvdBCk!%D?DXfO2>CiHIRcy1cW|xb*Vd z@bU43CqWIf|A(%x4vVtu)_)BQN7lz6 zL2_u6E@>F)?)q(f&-b18oZtEWnCp`3LY`;V-h1t}?sczw4L<5jf)Jn8T7{EQr)!Bn zH_J3h1u?wI-vGeXH`Ox)_$Vsg%Rm>i)FA$4jF`tqvrc~PIxo*}#yuL+c|gris}STl z%ZAJmXz$(tH)j>Y#3 z#g11Z<3t_BzPQ)~sY!Ud0Z>o~6%0?VUqzwcq$8DC92Hf{EK)gxDp07$K#`R+s9sIppstcNmH#2v2ep7(ZrzQQMRJWTmt;LX|ieks?9sR<`#ckQj0T+^XfK{|c>t*mFx zn{`#B(JB6wekaD%L5yc;S|qtYNdhh!a%Xg#(zdY^Gb;?KDuiS$b)(M%r^XU)X;=so zCTb8^pI{9gFp@S*bi+m@=;lbYd!3@l`?y5v;#`gAE4Gu@Sa6*(6=r7|j#VY60hSs? zrEVrG%Y8(f#}%)R{eK({WYl(F1pOxRAq0a4S{_{S%$cSB<28eS=M*>gkOE_;;lJrC6%#9&lpkXn0E86bs%c6e3oD|(XD~((`Pel?Q-8;B4-mq z%|2#&Fukqa)0eotAV`)jX6R+-Q@QEpYI88)4jIHb;Y5hwK!Rm`w4CF^hYvfR!S_Wg zUT6!hm#$47Bvn#ZJ-Iev5rZzM5SS7v@f=nyfKX8#&uD3$eThIX;O1*Zy55x|BlkBC zgK>{XM^D6Rx`sn{N2#ci96`Hj8 zzFy6PINs(BcJNx*CR>ra0J-3{g0|X9$KT%Yd*z;`K}?;8rzYBSRLp;WPGa5u?it7L zDAp268(dxbaH{pa&EZxvr+Fu7pANU0Enl@IOE^UtCB5|d_dPidqgmA3kCR+)8_&?^ z!Hhz=J%I8cSJ<_O&G

nEC7st**ato4bw|oG#+y=1W%6v{FI*OQ#uCQhc z|K#-4iKJfWT7p*i)(3(jdtN$X#6qe}-e|5yR!Hp>e(pUHIU{XtswXaM!5}_ftaLn+ z-A?e1x*vu1n26@kO0^lQzyK*vFd>;Em{$DDkIP(9!XA+g^wU5lj`5zLwLiGrrh$a0 zVZi*uo*+j#{?#9h${*GTamPDaLiLTIE` zjM#vqd)1OtzUj<75gn-^x$$MAdiZ?A7u>jgJz0v;?8HN3fjYLf6E)SXx1cqegJ-?D zT0DMd4T2Vi7FF7mxGht(S|L(HzbrvuV?_{J^i4vT8PA>)a7gN>W0 zVOGNEhn}8W)TQQK$f~su;sCe|O`U(Jq3uN@@#TuXXt`AG0@0&4qf|`^!rpol79?yS zqMXmN+b|dqBDq&4M;FtL=gUn56_R6qo#!BD%NH`E5WP}$JDqmK2Xoj*@}odx&*Oyl zB}Jv~2kNcM-8b zg_~A3geLn|-QMml6i5c|2$4pKfY^TsstHEv?BFYLzca3}(G|!s(d=_<-1skfQBYJ$! zi?tW1^X3FeavL-{ex@<@Va)gix<_+^UbHJ!+;6_^Cys_vn6WTlrF2Rao4fohD$v@n z+eP8-K(+?U>A^?~nV;A#?*1lE>V`EiiRNe-+Zcb*K?_zCkdH-qesC;SpQ9$=f<4$8cP+41 z>JDv#C@;#f2D^#7osOst(+wwAS5byWZ-oy{0Bp?1TW{Bh))B~B@4iCpQLn{b{)AI} z-)%x~O|AKA`^%$~B*$*`;F5e=w>c6|Zp{jTV`4)}F$2wzEx|L$v|cIMn1?=?rL85w zGP{pQDgFg0G43wc?~$$m)Z*eMDskuM-g=bUu*WC>(Pk6ui;ya^o!46x(7l%I5hX+@ z&fhFXs81d}><|o?9uI`cXB6(P)&MS-g|I*;ANV0{i82fb@K0k0j+6=hHsE7aSaC|E;0#$<<%mu=~$p#YuI!+M}8T zT17n}1Vc~@-7sgBD|@eooap$ZLLYF*F6Ub6N~jx1%QEvJiuckQ+7x~sA!x=#|5{`T zS!(lt;>$_QuQtEj{JYZU*mW9Xz%e-YZSB8sVQ@QunDW)!0?eoISwimEe3>I-*_&o;0Jad2M48u@G>-vtL0AcQwx5JrC(-k$z z(NScN5@sW}e0zNfuEUt_lADV?UcVxq1dVPMlfn;ATN=xcXJ(^LAlc>izm0&30u?3M4hYaC>Kl zQtZ;P#c{EIHEL6M)@}XkIz)c~R-!Ig!rp^GJm8&MaO@G!B2eEW2Y)&iHeltuDpieL z?qZk0=Uq$2LV-dKTV-%>exy^G4A31@&$BUeaXdM6 zwRM?kx_L2R$vxUiQ#0nw<0L**AO|wyyy8L@W?rq3qF10NQq~S$c641v}(O%WVoKx!AW^^OLiadZy8m>XH)p6l!#=#g`}bc>$znNL8g{yM@^cFUpau_i zmSjf|Vlb)SW&f=il+Xn0ZS5#pGG1RA?+HVD&}v3(ebUjAzzL+h5b98AucN(e8X{??8%u~7jvi*JDJYnG80s_dqsG}< zE3NL?Bi-YUTF+zrkZ3T^FLumQ}ytxhMgl0bC?N^OJ>m+VtR(jHQA~ z<9k;2Fm$k@V0LPRRu0BAPJlf-Q@%J?=x#LOYZms+-(8rH`b13k2!hjQi7&^>47Kzr zH+4xmC+;ZqV`rJ{ub%s9?)iF(I{}TD?Ke&yr}HeX+bFFfx6_CdcFT@s^|=LvU#m*} zm|m4VNtRk-OZK>is^eM0-h}SN8vY0SMEe15)~4wnzO7T}e1K_bQ7U>jcq1blHHa=r z^EP`QPdNQq%J`ld zwYKgR>FeBmzD?VkQ6`+P%~ zwTG>Pb&!zt+?6^Pi2$0nzX)?xbrxHVPRG^2mFmz)B=j3=Sw_<6{MmYzSK~L5I*o_A zsG*Nj$SpVl7h!`HY7*nQVsJ;3Rd@a&UZJ9G$yU5)vsik9T>+brNaaIjMk4{13`>B7 z@0iRxmKaA2wGY_8zGAzsXuGxP0U=(-?GK(wEwC(s`VAE(aqpczEq`!B%5otGDAF@Dg=vsU=FL5BqGHCIf0)}zSgqi;)l6yl zyAnPB5J!@}GQ+?PP$wW9tU~N#?vw%}TAH1m-M~)20M9MZ(~+Eg?v;+Ywn z!*py*azbpr<11*MzG)KsgDT z>5!CQ5l$ZEd$15@VPL1I4Z<91_oXQ1HGgF| zJ%OReCe#z1I`?or?Z(_T(o3z>8#JS`t?9R^Y!|)Nc@bDvrDq)!Lu}zh%IvDpVh64dm9Na7H_nwJjrt!)jfpJ{w}fQOT!asd1jD^Zpml zZ`*2>nyZ|Ojl3OU>0IrhIyRO7QmH3t3(n+!vh6AsaCUF-VwqrivRoE4CeflHZC%0v zpVQ8Sk>9mCoSS6vUShkk^8{(e9V)WS#c-GcuN55GrBL8M^*A*msyX0QGIPKZjec{l zFI3hEl?T(eLcl$-I$Osne@;C1zKN$NCe=M(2jHYRfT=iT!`I8lYc)=q_q>GYx#lrt zdw)0q$-UQ7Z}fvM3e!hZIpOHMDZUPsEBN@D)3925SN3V@Yl_Omp@K}5l^g-%WyDGr z^K|?5cpq7Vh3V+$32-`(gGk!%1u|lHPo&# zTNp(3Vun9xFqgn#0bo5pbU~vo_atIuQnF?&-)9FdC5-#$5b6K-ddfwgoMkPciHmov z%;1G2R`5arB@jnO18lN}XntX3Q)#-?o${oThLcS5b>PJ^y3Yi*sVH7kq;6+xCsO3o zL~aTO-p8j16zIM#Uu<%N6%wNUu{S7Je!Gi`PTE1?r#xHpAz}5ciSBA*Ho>afL>a`* z*L`jQcXu1_9-We9!?Dh{mXxBX)bSYXck)k%*L>(&54G1~8@!E!$dmq*Y-&QIUy`{+ z0SSxlDHB#NSKB;%r#;^YT!&-I)tSw$DzuYv0+%Hf=HERC#P{jh zM9V6wIkVIAV?+!xxFHVva)+p0rQ$wbJpGZY1T%+Us>Wr2RY>>i)VI-3)c6w8$@4eOwFL2`5QVhy^Jh_83ACO&Fn!3Dpzlx;GRDz& z>%f(|r>o`xX`y&ebZR7D^!Z;eq8XDzd+HSp_7D}6P)KOvhh%IMPRodfGC49dYuvP% zv+z=KC}~6=(}q*y(DfelsSh$5Q7HR%+IiuuLD8smXU5S!g$x z8c1}Zz9YQwwC2BZc00xkq^mxh9hAlOltz7Cc{8IWWJuaNSS(y-qdul?wMtLrNc647m9Gksqa9bT(VuM62)n zIhE!FUk>UGO7*rM1JlNqdn3Wx{A@FM)I`lp721fwr+)l0?94;Y=@*6uF3H<;M*> zEHT=(0P|aStQ6kM>6nZ06f)z*xE0fp>oxh0IY`J(KI$GB7YaVs5otRGLoSr+LOnkk zqog7U2#duJ#>M24gmCWZKlqQX<=#3J_Y2&1ca+HUn3s}_`np0L6-GmAw@OW;s&WYn z!L~~)DkstHH(M!I%Q|2MXJ*-qtIzjV$CevVL#UxJ>CS2{zjloWjv>z!mC};cjn%a^ zbKnXREVPsy_eKh82KDCr2SPn)Dvi*^`L3KNEPow;av&PM7Ntz4Yz?ODOvov;3eN0X z+YLN(Z17T@?zCb3?nZ2FS0SDpxs^~T6ll^erzl@bF>L56I@Ta8w8u+(=a>NR;c?8} zB#y%H&7N!g9-j8^83I9pC*$#0g~M!%J2U%!bD({9vQ@&Jo>&5Noqguvj~CovLr|5! zYaF7GSrx3M(VF+0>`e|a@uH` z!iQwY2Lv>s0~T=JUGe|kmfC6VC>><{zw6NxAbtTQX-2?F*=pp%(VI4-MUBWQe6G$j zQAQdaxN3SA^L19CB(|M3eTo%(YPd|`mL;!H$B$_{a^wP;71^W>?mk2ge_)N3D zm?50pm`pKmF75lYmxy&7_Pgh{g~c-uhnIZw!Mlf@8)D-&ENgxrI)yEKu?_5En#x;M zT{e;=ug1;CyHnc53>y?hG+DM1Mj+#qlPC9Z&5F;sOB$zl^A_gHQ^@9vpK&{!FW>|k z@Szrn_uion`w4d|#E-9q4tQ{j8PI-oKv&xpt#*y^k=cuJ)6x|tZ)!VEzBvA5^xWZX zO)_6tLSq>!`vEf4}*$wfVv%) z8h&-R!Jh!FF`iMnfJ5~hEPQrGpkf5bTx?aq?ZRyCc**Tl-=Je>_N2}PE zkJkR0;j7_&&B(90wayhyN{ZyRDKP)SI&O3?< z<(H_Ggn6za8%^8*{;F0DAtPaJv{vw5RtY@tPX$&4+_*yrUeb9l+SM_< zgCZeVk<*|J6H+W(To(QurcVIE|9%~~CzMuoT3Pmz@4ma!*``FKss4~_RU}G&cAF!$Me)F z%p_JrSLGPPO~nVJUe6^sdF3%r_m672){YartokiX3M|}!2nE1QmFl#;d%Fy`d~>yC z^=%Sd%zRC0!ty|zP`|3MSYku8qu*ezmKvQoH(iC|EpUCOxn3+Vi6f_Rvn4#WKRuqE{ic=-yri%R)+T2svpdclkSabNn6rw2=_^^lc1aM*rz!)K1gT4n zgBXKbIT(ZIF|MCq{foIX;fr(}e-|Ie=W)qASVx`p<5$)1o9{MK=gl*cA2B~59)W)O z8YEAk>$mxfI_=115@;ieKQbAiJNPnl`VjP#En>k@~nk4Q~eYGWODL)~~ zZ&`b)=YA%i$L}y)YR~$le*p5vW0xyO;P;13>JUbOq2~3^9@U3aT8s|D5BtiS`uFNw zU?j&9vEI@g*ffjnPY0XJJ)4I^5-GOFB-(C|Nknkm3|3?I{ijo~ljF>>&8bqmnZA%9 z5{hL%wtO$?kM-2$+v16D2`R(BSl(Vj*ac<#&4jrvJ<6;+yr0<8-S}G-_3RY;>ZKU+ zjw27h zhX@c(Z3sr8Y20hQvUw^Be(AGDCDFf!5-Rcdj(0-D@OCFXNpEQ#-WZ@3l#X1Yn{LLj z&rYCi>z5Ca{QJLOKXyO26gwIBJIIjKczlK!TNtr;b~thVS+UgYhME{&f<%CTcx|JS zn%GB|+QOg(8{O1yt-_g2?4(qq%nS0w-2bDrmW=S_D;&$8B}Y4+dp-uWuq z|D9S*+%HE$;OTP6{hwdfv|Ou^BJi^!(LuTN2eSvV%9f{>+9zPdc4rfpPAOKTjklh> zcF*NY$`vQT&Nyy`PG6oeKMl$1s0%HCOv|@T_}U`qWak4zrw@Bxr74v9{(fvjXROg! zoQd0a^}+5YM$WcR1i|WRLvRL%WCG|4DAt*1b9Fg1Wh<4-mY-;ai z8WUQ@cIDY)jVsvN!KIU z44i^PnYp`Q;w^o}317!x<>jTAf|OFskuQ5DBy;H_9&A@10WZV@o`dag&r!Zc6^@<& zo_NYej1Omi%3?pMKfk#IN;Fu*oD^LF$5)?sqkj4Kh~<9E_Q#gQ*nOUvHB6gq_q>5p zdFrv_khgfo%Rb@2VURJd95Ptpb|*UNM}JMY*;2R*angn%!tq*2vwj4+nBS|>#B4$` ziz^BZ`F$cQGOoWU7d(!<8b11FqG?Bu$!;w`Ey-0~v;aD62`QK~=nu7;-n?o`L-m1& z?#)%h-LZEcNp8w32Ff+H)MJik*Qu(wFSGGIHn^?F@TY2Cgm;0SdtA79e-VoMnaAu# zX@QDoU>OxM{{#{v^PKDf3!LmCI9cn+1TBUbM=njc%?f9Z6^=6pGR=8-sx+`qle+X3 zxV1%d=QXVWEuWx>n_LADZ`So_J14XN@Uj>hZH}-APyVIXK ztz)y$xofC~{pfXRM0=P2{L4di5ixxeeBu^UM!S$M@FiaAsu&_XAA_@MV3+IgiC4CnwZclha}nS(_l)qecGV zpWFJL=Mql$m#WU)3#Z?|F_g_kq{z2-$#}@zXfWZoKN^SX38Q@u!hUJ@FWis%woG28 zQ+U7eF2qa;D4GuWhFqFzkyF5_$-dgXOktwIC28RE(60ZaOTub zac0(LEWH6I;yE}Gm%xc&QF>k6hA~K6z!=Q>Fe3XzFYpLd*4)DBd|lHN*Y(>Te7eb@ zlL-f9dKbN#l&M+MLvrg_4z8gGj~A6|`}M85Xoy##<7; zS?s?)qV2qB^9R)dhn=umhc@(#@1~qd>2_tCXuL8 z{us7UkMFvY5&A5x@AW}qXOm^T^ZzvHG^mZnF$HyCno!`9Hbp72#?Lri+}XhEm?omx z$RqlYs?8xeDv401d_Cv@Wdd`ZVfImiG~9FW9%51px~p5##C2iYf7lf}mnrIK`>;zo zk)?gA`dZNK1lOgA@lNdzvTCu7$G<8VXNKV?d&5^;vqu!YneGOyzMLzNL&vsHXS772 z+q8eq=|p88;2q^}Hl|G7_V$ju6H*n-FI*VmdWh`XO+b%m|9otB{x)x7`t`YmYJm*4 zCueFCCn1?H_>I?kODu1q;yU{L0H6FE2yTc*qtQP%%l$TFP}w*9`J3kPywwhFhK`?8 zJoz-YaHmd{x2>yYmi_sr7%kYTaU<^6&E4hQ?;lJ8 zS~c&NJV$QRd_bM5m9Ev5)}9|;+VnT*rQwOn4KBQ9(=v@AGd0D{bdJES*ET5He;tbH z=HW|{&M$n?9CZfKR?98%9L=YPJbuROZNW7u`(S5h?&h+-!R2z#nNK)~Yc5#!C%FH$ ziN}JuKa{|Yhb3PYB2T>M)KYd@EHCPGPvkC}yubE)LAxmF_@3(~Z{hA|_HQL`!~Z5X z{^@RfX6x%qhkb8?E#CePJD?K_)W3dZXQjflhu$&7Xx#|AViFQ}$KO+KyF}sv&-I7ek}Qacu`NVFHKZ@HAze@Lm1Nm1qtT(qr@748 zZVE!oj9jW}nc+J%ri%ew@)fkJSA-&`<7mW-TiZI73Mk5dJT{Jwg)%?nQB1K6Q8d*} zi-^n7X3AuzEf_VGv2_Ra+YdknIN|YY?gcd+pN!H$%63d`-))ahqH3KzmB)7lNlGa= zn;Lv#ICW9K!sCUV*2owrgc_I0Ii`Hp$4oj;=CuX$g#fqv_Su_Rlp6tlhAI7P+bcDC zn?^M{rIeZ6`Y3Y#v+wJ6h=%(@r*}dZk=tb5GF)=VE2V!R*X z6gRT(Qlo#!J&v3;d0ECp1q6m>I`;}(b3H_Aj^0QpgT~gfm6x2BCk(V68+h<#5xkDU zf~HO{r;U0IGy5^FLSaHBmnifh7Ew;xZiyX;!417O(kce@tI$v zLrQlm<0|#rwHbrB%{rwt%97=%e|lAz_g}^azm6c;<}s@ZcN;udzH8q8qdKN@B*VgA z#L>lQgXX*;J~38O)?B~+>8geJi9yFZ9noz*exB2=`1%8!S*FC1rQvo->4UeA`gk(d zA7gjo7yKZ7?9nImu&35Pc^!2TV&9XL(c_-kO-1sJjmW~O zAX$M;+mWxEw)?C8%Hn?8IxIzI){Y+A3;eF>#!xZcd40H*;dR7%9Rbf{{g93HzVC-L zP3wm5Cdwrmt$M@)1LrwD_s$}omAl{S{B=LO(6xZf)wW&5t`XilHSrpvm;tedTK+|aw?ns!&NrTN45@(r&MJ!ld;WFt5hf39hjZ(o~+xFYS!Z6 z-gEDKes3;+b=?cc$85CnyPoD|QjKp@is|f$zjmM(+77XzKk6^1(JEKMI&CWBhj6Za z98InTf(F_C_~f)Lkv(-jcDljM3sa^E5#&Bs>j6G-!3ukR5zt4WE!&Lb6+)zxUq3Q7 zk@}^otgLO2s^2u6+3L;s1tY^xVmoiTa}yU}RA#e7C4rm7F~Qs!g5qmlDOnV2nj6@v zvO4yf1o#K`-szTEVD60gaVPvxBs|vt&W+-SVm$SSBD55+um_AOZZ{v38Z*&g6PO+d z^T-j9X5v~me|(@JW>1h-{=Db;!hdg9o%q_dWsw(iRCgliR*&D+tVp=C9f!~kl}bps zNnAIY46j+~>Pc#J8|f%K(W&El)?=&We80WJ)=S*Alm5(at3=~CX%jbe<&xV-GNpvL z96<@A#L@mS&Fohpp0RDmMI7VCNcpYo?6!aH8NIUvG-VhGb>Qs#OE+%V$AAf}q9{kV z*+K_FQM_W7gE*5L{a*XB%uV#{2g0gM3sWnH8u9@sNe{ip42>U1UMr8u9w&P8y`b@O zo4W*GEg@%7-}b`liN${GcU@gqvA-SSAZlaCVyqGFI2bU@L|J;9(wU|n#wMKT@>G@0 z{T%1O&ZR2aJ$qyBRFd9bINpCJSVrdBbw<7bT$+vBG?{w&K3VcgNrj5zvE4}$@?3iO4yh@qR#Pd=(=;p2MS-PLyktwW8Vb-i5IBj$TRleMIN`|5PbKXfsw#6bK9 znlRYI7dhGEvjY{)bxla)bI4jjJOA#*npgmAV`0>kj?<>P&z9@nX)>%1 zYV|nxl-VHr;4j+K9G*eH%;_>RI{Xh)(7$`PDjtA-%Nib`yFnff_o9CQbjcr-h8}8k zMkuls>p_{@z-M|qK=DBT7OdPy6juH%7gLa*8+R0(qb4FVzye|`2M}A`RjMfp3j<)u z-+6HXr21w;pO@U4AuolvZa)mhm_th#m(T$7q43#lL!>XP1D| zui_FWH9{r_lOD;&90PugAt#Rx<6vh$)cTb6>SJo^tL4;tC2GnII7z5aq2cW92FFW=^#F9S>M&j7Gmg+EyB%7~-Cc>KFa zl49K%d}hV3;w&7Llst+9>7W_+`ae8oQaG?-YFs3bAzVH48T^ABdGsSBMYJ~`s5C|) zI}q^8U5OY|eTP(1pCNTP4xWG+EyG|63}L*xM2JZT|D1J^pd7W_RCkp2jM=|gx}sI~ z(;qtghcAlE|7>E10GQT0xC7J~+1V|DHDz>+W3(09K&T$RU<;oq5QYUb=7O)t3Jm*O z6+V=Yi97Pn=^+cuSHv8rI$RVjLvjsks*&Sr4 zpb05_YKalIrk-GP&w!7JVAW8n4`9Wck?BjKwe+D5ORNXi3Adx)E_^ zW)7)eG}oBwBrr#cDVU=K_8bU+4TgM~1^-Qa0Wt{xJm=05r6?I(HjwwI~j3{AE zx=U^nPus|a9@29puG&DHtvYSs;~-xV<=w?Y82&6l&T_8Ov>12yI*cCb;oFJ2x)PMA zmk&WxAef~_V4yn}2Fm=u3*;h_Ic@csr$cN{26Wsv0M7^J_BE!%wTG8NMCG{R}P z*4Zkh^B(_@!aQ)7KXLgO(+u_;EJY$O|8?WGqxj(>&zpir++3}l%qC9N@!>i8%Tl2l zs5qjV^jcm!Re(o?K;o*&X6E{K9^=!>rMR+J9Ce0MAIg-kMc^j4X>+$ z^3tcjox=ZkJ3rioZ8(5)Ky_f|LfL&~kSwosfHmjT6(8Z+6oahQhRdNnv>A zYneB)Z}dE|N%r68>JR((V) zp?@g-cU(bv4iA~)Yu>rcoWdtT<>#UStzGIsQ{@i{l=2^1D(e?FjZR*1>%CZ*!UGFl zf=)m*)r^VDh*b|H4{@2xBq@Jd^T{N19BxRkZwx2^kY}uJr&#go?4`7B= zVuSa>z%#CcXH?`^6)GE?$3J>=n(tO;K-{l z|GN(Tr!V@S?%xf#$2qW{(!)yiBA!9zn2X7HJ04|lb`Z=j{04G*(@{`u$Q!(v9*53@ zOB(hsaH_Z3Rjlv{S~wzWX737oqgyEa6pTRQj0a-e@Zw8eT=h8BiY-EF{MuW?zUD#< z>E6Y9HMVz&bYP9ZJipPyPPW!y=CmTqV}|svTtfM$;|k*x7j-Y4t$%h~{A0h>aDdvy z>*5v$`@mpDVv(S(_C3Y2N; z<8Ds?^%R9k5+ydVApX9EDlHnZyIZj`SA%wA8%aPf3Gx^y4?S~2MYD}t==D^$87%@% zpw+o^I0yXSfXx5i{RPCpUQ8h!n5*HrWHIX9X%V_$Z^cO^RSV%V!8lZ^=%!LecF9Fm z_%V{Ur?snLRqsT*AaQAwJ#C3Yu%nBE^*{L@zkP7JNgoZ)bx=q5!(^EQ^q^c>{jfGJ zXkKb3)6YO2ArvNkqCB}H7QGjplcrr0TtA#dEh%P zxRU?hl@SW$ElFGMMRq!Hu~-Zsxf4D6U5t!H}=cC*$>C<@&n*x8VIU&*Q7 z(sZvR6&zq2MbbV2b$#h9*e*)x=lsHG_NW`zAYNAZVeZ2(^k5N3par0T_rCy&|L+h2 zhry|y)rv_EFU8o9yKFIphQ&`lG) z@pX^S)!2nF;m7^Pti9$9%h+B=@6v%@gbl1Wt^l<(DuT*U6E>gghfqDLEp;~dR=&Nc z5e7*cHQ8;^;nq~r+BIWwcy7&?Gn8b(@HYhfr?uf9p>Gjr4P?J-qEg!lQ=m12&$J@? zBih~Iy&Ve5q$qGYLw{0@z2$pI6mP(KDoTj0y)QZ*hF*x9pDd3K?>kyTd{P?f`QA&! zPr&E3cH5dqn@iuRJN6FNDz(O3HEoH&hu>@PX{VkTzS5vdU$-AFCYFuB*JyUohnpRk zY*-<9=;KzoF=|NwSN$Kh{z^0ml-jjTB1COLU;*|+PJ$eBT>_I~x>*L)c;%gfHm?P< zAg4|!k&=+GX?sz;t)^{GDuut6V+)xGr~cQqc1JEtFyl--$7=p@4&-xpQ}xH7E+Yw1P8 zpv^78VY|q_-I%k)^!4P;;q^*JL)%xOKhsFvG8nTJrsWj~5f8=ZuOdfm1F<44FIhA= zSJwHMOy>u5B#F5OwP8W zmnV18#U|3z*7~KFjIhKEqWW*TrFvDF_X=>@V{$M_Mr|QEv6UK4~UFPm#k*9p% z%^$WwKh`w9CDq*dPZ24-(B1NWwggtwEa=o%I|z1v((`??*U7yON;Nt6h+LoN@GWqL zOk+i1Z304Rr0(wUt-{B$PHVbwn|WvWP-`OQ$Q!(bB#VQzHJ^xX^7?2sdCutx5#$iV zE=#l=ls;@S>X|bX@gKLlJ1BZkhz^=&$xa3JK;HGVWC$Z)aLlBJRPk1(C`wh|iQYj9 zvHx5!hksBDRlE`7i|IxL<=BjeEPbk?Mi|<&OJCc0 zDzyV!g$yjWivCly<)6VxE^>R!4$>U(!?XE$5tJHc($m^0u ziiEO=id???Q_>x&?nXGe%C4Zz%HH-;4vr=L*q?6SBzs?arOl*O`Da1-yqFR#;yESD z((<~&))Bd1h(*gahS@n7PzC8(E+jtXmWZXlh%f#nR`@^OX(9s1F-+ee+QFuLLx+*l zX@7`LL`1Og#Lg{DoQ!d9wXR}h1{Bg+{ERwU$f$!TJA(Q z?b;I;>rkIiEE6POYO!7AEm_pPz=taLX0wvt*1NbrKYN$# zmF+v2uWP)NUVH;mK{;Y+t+eXU9GGJhgjO1=bq$b`?c|lo)A9c(ePk?$HXj}=^O_Lo ztzBFBLFy-WtD(&%H$0Il)$VQeMbxYF<^NdwX`<@*Bv+bWqLmF_LEG|kyP6L`oAPq~ zB`?}L2|mER$HX+ZG3!L5X?;-3F`r>+zL@hjHo;31SFBn>8dDAl>^&CMuX5@VCbwzu zemnoG%yyir?o9WJMpiJmmmTKJ0Sii$jdLX;WVPjey=QS6LW5>)h6^mCm44jfw?gj)_lJFSi|DK2@v$!BzrJrPhb3Npwp}^-qfT0m^FA-> z8}lB?9}yWh^h&OJ^;sC#hZ+5z$RWx(Rlk4b`VQ3FnO0f(mg7~z|*&fhnGHaZ}?IhhLdLed2|!izj^GFYfK7EgaHF4b3BS?Z-=smzm6MPjmh zD98t_fPA{TiVAf|Z0MD{ zARE=ZW?>2cbUV2IBd8|T4K(FlxW%s)83zxeH zsjg4v;`)>Hk8EmqycQW~min%E*`OKSkH08M59&kd8F)BiPBm1!69w=DUY+=o=zp1R zZGg&;SKS<`trfO=)loXSEM|4=++;WWS*gdCOE<-K^~uJsFBI{rJ<NHgxUN_jA z%3Rkz$_MC}6d2MG!~J(Q^Wob+T$`(|?Zf7)QyG-WjlW!ez5m&GJQH$$cD?eKGe?EB zc5q(Qqj{6SkhB8_ZlSwCK9YwFE&jztcH5n(=*^&Yy4^2df;W4=NxKoM>*E3b(YZe= zRYV`>8PZ{i*Ex%)<~H_zFqcjZeYvZujLr*ED*f@A!Zxzli9iY)S|37zBAG9}C&H#H z%G&&SU%FVtvf*WafewKc79}^6se&ifhv>E}JgnNKe)P@oZ9nFW!$UA%%DmnUGnQux zBT#aQM0$i8__PJ;YQ9u|ky8*PFyd9~Z}^4kt7ut$%JgYC36$4ztrCzguEa7ROJDy> zbK;+waUasLamlS}xm$Fjm!N0*`x+{a&w)yl3mq^wY{EA_QBaVU5Tu8eZjcV8I|ONI7$hYWk?uwYkd#h|K|<;72FZb;Yv_CydOq88zxQ>W z?-xJd26N0h*4o$F_x->AzjX)8RL2uoxjEtBxd#RVA5qW>f-Aoy|Mkl|3P7k={Y-LJ z{lL2|V4CUm-}ip!{r|{4cD_$<%zneYJSf&x%9uO~5OTOr@9?qx6(;;)sRbhH1_LmP z3j_8g+(iaH2Kkz8GklJ(gL*#pd@@QLHI?a~qh5yaN`{JW&rkz8a5a`QpAFB6P*2Uk*evVg*j97 zBqa2Z47?DC>xrhLe91=5Z@g}HT=CWi*NT9OPTPv(v+uq}^)sbD4V!&AjY|K#JA=~EZV@2Yjsd_n$u1{h)F@8*Q{HlBw+@U`~o$HEBQss&Br{vqQYh)C5k~x}70h4my_`c*4Q&mr z=H%%b;!7LdXTQzMReXrjsCC((zQWUP$O5AFvTnGA!fBlRxK0&8RM@H2W-n45Lsrpj zA`1RiqgWV++g7DAIFhsQDem_}K?C0#ri~f}%{&HPpB8Wa_zi0to*o5BzgJWe!N;eC z#8j4t4?QK{sV9*<(5rvgG*#6$JX05e5c5@er5blnfjqdN`bD94-dPO;CC^L3zop7w zV3W8uEd0~-x;7crdMvqkK7qTl4QM*X;X)z51x5jxAY8v9FCTKF?{ndw-0 zE=~^{6Sjn22Q7T>NjiXYH+Z1r@381;AsE&QZd@Grrzn%lw~?>P`#4d z(YLnJLdzu?g~Pa?nH(^@vr{5ZqLpuE1hx_&1~fAFb! zYIVhpvpX{}2=CmW)GSc4HSg8FdhL3gw&%K%hNcWxaU;zAYQ9k{GJia-+`fCHivFC| zW0Uw@jw*>LWBfXbSNLM5>Hh0FIe1~DcwNonF|4CqfoWzzibj5jHnh|l0fFtIR4Xw! zzPZ?0KNTTk6+V128cw>Z!a!m%Tf%16m*A_mL@Cd}r#;AvF7Ie`K5A$_MbNI)ooeAL z1~<=;0`~@S8pjGn6W?#QG?V?_#~)Ik;?UD-FP1dUBt%vG{|p3|g%SqO%(3D-%UL~x zS27jJbShA|1vUEaK-af^QSe=dJu=jHgna%3jtqnE!D!E4`onsM98tbaR%lEjvsy zPWb4XI=Z{CljT^e-veBS$c0(R3kFaIHzsJ=7?bJ>W?}#0o{m0|JJCW@kk?gO|O+jpFxW#l4a0QG84tZTY;5Ym&5~bTC^T?jfX}rgmmm5HL1oj zW=LoF$?O6D^3nEr5qB#4Q&$suMlzw)(}Oj`o^c0Y=00U-cy)Qae=vq!ZWan90q&$$ zA9JJIC|V3Z(ke=3mwij4B%=yr_Q*)ZuYxZCd#?NZt+wG zBfhg|9i(d41^NF^nwAokw5u$=A3Jz;dDLLI-CNNvtGdm|!FNX*>v*nzMqWlyuxL6v zCkIrE2@>_r%?cbQo!~Lb$2`bQ|J0GZQ5Tw zkd8YPEP~oM-?)QG?qf~p$U!_IU{|D%T@4+UiN`=^d(U4fIbO!st}s~iNleO}KbgO> zE#&fc#O8XwHKE@^)rq1=nsl8zZ~a^4$oxEx1FPZYJsQV|6ufMqa5<$oYRkY?rU(8W z2l$(CrRcoEp0v61<+{8aU=0~CQL79Oja=a?FUdD1$xP+VJdkx1aen$Qxhfb0uPKeW zUV+PCe@4w0?WM_|HKe~q=#|-{TaFxK1JIL{zq9g zjhmk$ei-|!6tGdejGN01!gqKx$`oQVOO~ix#ZbjjW`1aWe(K02@tNF5r-0Az7Sv!2 zvz;LwCz@!if`iO0hct{?cdd*v0_6g&*dPC$6q%HroPRwU(R5)_;cD++TrP2&uCcOQZa2p3Y-GFlZ5W}tc{8X zVRTfwoWAqe+3R{T{+*sXj4X7Bg`nHe@7;#4m!~@=8~1(7U&~L5n(HcnS7D*{s*2WL z{VX7@&DI{VDA?*11C#si+KC;j2|NpI{|cm1my9B^zux9}{s;NvN+TK;UZQ1hDXSlC zov}>?DMC)lFlAqeK995D+E~$ICB*q@MfE#I9fJvxuXH|A51Gpvox2dS)knPoB~EL2 z@hF?+twVoKeyni42{Rp zf%$lu7dE$*#ZVc=WQC<~b^F4j4$~l@)X;hR#gYB|nb+|pnsqAwBlC-kSRZDqDeisS z{u|LX??m@nLwk|PVVo*2A+O^#UF#$5EG$0Fm)t$ijT_*b!o5HZK?T0Y(q zdtscavlW!hq3z(44wgJYZSCqRRRLY(lnJ{kWq*MM?yIwmhM}}oy%%ra{cwIVnSQr1 zp1H}55HPU@iln1;oSr)kCUn8=o=dXh^Q46{QaHWGKNM)2s{TnKfyg8wCz@SR2&iIID&;d`G=&6d2I=QN@A=baWY zlMtD9UT@gxQk?y&8SeaqtAJI5B{8i3XL3yrr`@^&=+=hlSQ2;03VeKE?4KdK$seHG zAWF~Trn}(xNalC##M0VKfyTyckr2PjTmOW1s`j4ehy=N6K3cCn0&6=xtlgIcd=KCE z?i@9(_X!8nid=;V_k3Asp-cU_#aA9#-geTz(oUpWnjvWn9N)xj`BM#;sp_y(dL z5wuyflE%;twiwo{XIphhThG#GRx~>CVa)bZHdVmP_wIeH$v#wI9op)KHYEmpq5^kqx6A=jGL6pD>tm}GASatHBUa8dME?O-mOD6@MmeR zF-Qd&Gp)6Bm3G?cZp>1$ZET!C{HDCu!p^LkyZniuE`lUoFm zirGY&bm9>>5)Its<^4_$cFIqPihOhiUMik~rrp>skUl;tdnMNM#dqmtDKJC@Sw+U? zdV;g<8gkb;of0!c)xYxX&{yn6%_6AwW?tu(cDeF8axH<^i+Z5~)t>)BU1TgPVd{`Q zBjU?lPZ}=0A7n2}GT_d((XCWdB8r@j^Uw6D{P2@{{W8iIP%;fO<0GZ`IM){JjEU2P z|Gi$Qj02Y?nkdtLeU1c|aN*`D!^nwRRPUA1?s-SM43spg*B9+%F*ZI$Je;Mlw8{K# zTz6`^h}V9ANQBlcQB`(u)0jZxm9r%;=Eg+GvOIBy$etVUdKF_i%8fGXe7hB#G3J+5&DtVTijrHkWkw0P)b}E*=;kmf5LBdzK@apiuBV8Zfn2S0SeJF z9T!hTs4@iGXnrhVs~419m^5!ct1mNxoO@U2!*6PSKG-r2_-KdwD*?xK0Od)Vx2ZTH z9US)G!*Y03XV0?y+FUk?-7s>%W35AaAk$D@;`64ehT(GNXXEf$V$ZFfj~6wDoxW*1 zQ#E1+>d8miRjdqv%p!$1$iPJkngWJ^%9pdN%Jt52`W2xgokk&xmURGrvTPDhlXvT@ zhpXd64VOE(H7=_Guf0ejzC6}@RcaxhQqsjs^dW!?V}7N|#q#IGJl}AdSuqU~dMt3XA z>`1`F-@3PCb+mMrvQu9G#NWojY(Su~^jGmr;@PW^)P{?jq3fe9 zbC;ea7ESCDz8hTzM8B!KSU4sE(_}EGLraz%?d{*#)kO_e844Y9+4w${CvL0~eZ65l zf(I-!C7ajo$~>6-v|&;yL|%;JqeE7S-uraw(lmGT zH-LUz-(vpXsup0;s6tz&!om9!%OTQVPVJ-ippq@Ol^$9s$*{ywYlHFcJb35)j-)hm z5p?q9NZ{D2nK^XstAaIZ&Ow&vn=Jdx;kacJ~NL!P}+WN3P%B9ud{DAwd&FXd) zSsUiryu`}QObg_^R-uI@hN6F8hP3C!@9;x@nFo?m!-ko6HpLoml1td>e$HX;!e0DA z9Oz$3#pQ~{p7D}}D|F|8%j69fr=1R3!`S%)j&ETSa(Z8(x^|$d`rou3z~#@4rip>O zsD|uav!rn=drrW%axHkQKyA1k>DQ&(0hFv^sml|b{c!zw3491wI29hoD z?m|V3^`;nI60DsR!7V{;q1WUzvncDvu4^4ptW10ZQ(NpZ&Z(sYSF0k(Y z>tKqXFn})Wq_u zJK`-E2FzZnvG2-BIfZabWM)hMOE3B{97vaKW2sM|@-#C=T55X(z-(*`U*=;!u;|FF zRz3({=lJhUWUR=fqV%ZOG=Z|!xxO&Op0Ta*5*I{P&2!=OUjz)GGa{fISRB<#J+Bs} zu;A5glWV;mB{hOZ{jZ;>sai(=`b1{38Br!F0g{MZiP?k4RsxWzHE2dn5e{&D{(n4-|IvQ{ zg3W2rr23W~;lY5dMGQ>YOp{zG^VT8spfGjqKk&bM?O>QvLgX^t*wJt109~$_8Zuqb zTv5uYhkEN@UOqtPkSGUo*-6qosL1hMhz(d9-_Pl70nQIpvJU_H8drOAB$A#gcSuO8 zv6t~GZ>kcFuuWo*a7D&&Cd$Sn<^H{S@V}ofJeaU|-Z~oyPNZpOi-5}MT7GdRCpB`2 zqe!i2n;kg4|NOrc+gS8#1Io1{t6dqucVJe2z}K-PJu-@;NW&xaFi}>ty`%RreAVzj z156#PC$Q}VCWFZIr*H9V)>W3}4?I(@$ZaO+kbNX&CdUhfmH{;sxRCw7(|cBApUHxr z^Bw45V?+{JS_}z}oixoh_2&#rfxxQhWRHj1tEKHd9R&aVWAy~RG0n^O{8E-PJNozY zsTfLKgrr8eaRf53WEC-o32{4ndVoIb1cgL-zrP>p@IPl#Ejvsk`g2I;3q+?3e1xUK4Eid}ii3=g&rKy-J;}T;1s}dD`X9r;jK8oT3F}mlk`lWC9TJOe z6o(c{(~t>ak2uN}jUal;WlM=vpOXsF(G^2g{W9MZ{Kso>TdH=Q7c3iGj7WJBuo3eN z))_&uhSPBnLy9S>h~ABxG?7>{Zc-i5pU7dJX*tyN9}S!y#x8idc|>ALKnOy~gpPd3 z)L%v+F!LQ953|JkIy6eq%Frfya2G$R)&8AVY@U4IA3G}1$JYkJ*99MdS1V>J=;n7^ z=42mK-Dl|S#=}%LrJ+ME3WmYHKW0wD2}FDeJLF$p*M9#Rb&woO>a(%0Wn!L%-(6}E zAde|4;(h6HX?Oou^nZCY#gsY0PX*l%nJwBoWyCbxnSh~UWL8)2W0Pzd>L@FAj|*CV zN{$0x;d4PoDr~-q)pjmfdF-Ua|CF#6V?uc0?CeWY8fLU*<>mW)$Ivp4y=az5nWBju zu7bK0zH`0zp-EY^gqZ2$f=+8O0rIT><(ra0c=`HfR)@Y}`1&jNn8s0nZ#NlssrYbc zJ?UyZf{(lI+JxlV9rpjt)Y{YM)O7xNs=f)5-y|sEhfQOgj9ia$JH~3~Jpadxu%@D* zxOsVbsa5A*LFUyrRTRfF=`lo(jr|e0(i;MUE3F^S%!k{v;oaS@PM-g{+p#A9KX^kEZ|6+nIAxQBisBvZc4bI)n!3Q*AIl$Nkh3&ugMeYIJmT@e0Xy zNy+mI*VCiz@Q!FYfZjkD2vG*!Tb<%$ke zW$>A=3>D^?uD3}a5Si&+V{>vK4~aSNt7gCY1q?;)fwgdRbMwmlQLPQYIh=BP+;*jz z+1Z>ZNAo`4MI&I*3;6Ki#zlg$vGIYi3C*|ERC2#ZB-!NV?(Q{eBKO4}{OvHtNF9ft ze&Mn*0tVQpoaDfiIJe^{lFM?qf7ZD3Oy+u|cLc$*F8+0;fJMst7=`SUHZjS(N0Pxl@H%e9!R``6F# zSTGRWJG5fbt$am3YI8m|$?&KrO)GO6v16y;Zifa74r+|TPYMhS>|A7TTomy-=dE#C z_sftDUkk^-ljiz&=F3r$x(!!YX}-2E6GIJvidjci(^OsoV(&IBVOVM2_cfzgy`SP5q>Q{e|va%fNTg{dhB* zl>)`okF$%xEIbvAckZt1U{GeEY(}a-4QBwB@M7^SudQ)Qe3ZWYRj0&g9M4pMcy|m2 zY9G#Z5{{Bj;+_YNrNcDeLQb*&{Atif-$9RgLTaW@>d>wf0QE`-mS(F914;9b9edF_ z-GtTK($NWtEeFy+Up^6i90V>NfhS7iY#MmMTDx;Aj!8Sct0&d#RF=GhTK&()H1`^; zBz?o9t=~17Oo7vg{%ycQdjm*p`j3{Z9F}M`mFz|f66@Q6H&P2Y^w`&B=sZ`{P$2Lf z`~-J*JlvRYA6ZTjdY@_eNBUlSeT$TFM`R3Uki*+F2=_7RiettmAjrPzb`z*0!hDIW zm1)5)IXl`OL*8BTUc4&`Ap=b?x)?{80=1x^-qr6uH{@}wh@&WATz2keky@3xpev>7kJ2C1Y%`m@jo zCl?n_v}W2`jzV%3j{CS-^vwmy8<5;)uEN#^j=GNbtt~C{nv73KM^DmD1OZ~9ZL%U_ zb_CO#M|&!6UQJ*FmoojX%bQuZEjUuC(puZIh9-CN`o`3q?Q0(#^z-LZ4_4OK*AIQf z3Pv}^ic@VCBGeP`O2A67Wrj4{0JmTtxFd1_gpiZ>*mr(`VUANcJ}YrUEgvrgv+it^ z14!I4Fj#Nt6S*qD5E4K_vL)UQ=U@w;AaDgQ_J>^0ciRT)-n*EXm>}$w6TmLCuoQ1F zTIC5)PzQ#YXjo*!`DZ7d;1cXkGp^zsu+i?%FLl*FKLt3+g(SQBUp{v+_8YJ@AdTZl z;8d+yyFd8yZU5Js0Pqt6j0iKlE>533fAirIrD@>#`Fc^afe;E-tp2ZElpEidkGDoY zI3A4@XsgE4S+7^kxW5!>yY@LeTk7m?g)72%2j=Yx#dU{YX8f$bT_e#zd>%UU zEqKRVefvxc%)8xU?+XksFE5pfKjL-n>(xXIlc)nwkGs)%x~6G-AUM@LT=M7mAuPD# zrX|{RA_QV`Y?~=&KDU6&R)HoJ-JlzRw;s(57VsRsy z{#fLrcbDaPe*3nDlB}Nzx$ozA*}`UgqoR=l&Kp{2f$d10T35Cd?@KqeLLK~<@hl_K zG$oVKhNd)(HJ!jz@}=D6BkF=qlNOpR>Z2~>^h|`qlT8`rF5|gbO}T3Ie2vwsOk)5@ zN%rY=wZ3LZSQv#g>4oVl}{CZO}G+MHEj;iA_v7!1)PVa4R`D~=WEAdvk^X1 zZ#}&^N`8b4Cyn`q8bLbr!V(EoH*sR*`y$sCJ7E z@ZP^5_bjO8tU_x*Dk05N+*(C#ab~Ivo9+$gDBmXcI!@ESXxZj-3cgu2@1>33=}qwD zY*`ci7r5Y#Y2bZPajE51VcWE(D5o3t=DF?B9YJq7O_m(7fs1Pg(c+V(u6V|&RBsos z9ZT0GLvR`Z-9-)9T3#iJC8CVVHvwBIOH<_%DRMPn%9X`7bp&?3k-J+ZhE4lc;%UuR zy;CRW*18%!#Pq7!54WlpF<;-y?@g*=6GS^S?X)tdRC}!VYwXtS5OD+Ub-WSXxjudu zH)7o--D|@_KeumMBh!#&FCfq^HTrz{F>)$F#KR5O%N{I0=F8(ntcxa!d@U_t6C04k zV>c?v#Q4T_qo8-*-^i>P~pI^5G%j-k2dVUO|(FS==YzzKKFzo(mTNQ=%nW|IbN`@+UCA zX~gWq^~di!zXL`F7SFW(C;j9OvMCR6Y3EYjZw)Z=xYY@tHlNsM#sm#TR&$D&o8=W>&}t!2QJJD0`j| z^vy3{!`w=}F5JI0l|I<^2J{(Nr~IYO!mNS@MOk_zethdLKCrKsct79xxyA(1NIO0A z#0y;MIFaaqM%(mO&XJ~jD9Ck;PmmhkCw+$01&k0fo z5SY;E$RrP}CaVtN&P9Y1yJp@VDdg=*t+k`V*w40|1lLWsRELJ$KJuA1?~=XLhM1OC zN;Th+?OH!;^FynxPuqqNht~%@f8Wbs5I9p19GfMnbHJKH$nOz>BmRavRWFuJ%on`8 z^Pk4a#v%A@1!PN}Fx}`8K|v|6h&VY8@ViJQg$qC=9YU&Qy>*}ogZ*k#)`#{=T-rQ8 z6z$-!7^L)|l2nj_oU=|Q_A!z-V`9EE~Fi~eH<_qsR)_s}VwrY7i zE!9GoMIp(&lnwWZ9gE!i;$3y3SL+3ylXT)daWHZ$T=k|s11$YC0!W2iV=5NL(5F&u zIwZepfrIig%CY8l202Q{9#iQMS3z>8S73X^rh9#;%=Bg5U`*3SXiM@qIWIn@e~ao& z8CH4to}laO#zgrp*z)EgrO8AO)F|-o9q2o}%vw z@&sw3L|wPRCEvxWzceMYuf{kr>=R|xZr;S4)V0LhruY?8Xf zHP5vqf~6JOoeKYoKNaffgH(|1^pdu)RQvI#jQvcH733RoT<1!^4BgIuwtl9ZPUeV) z4%PA(g7N+8XVjZu9EB3wJ1WAzWT`+G#P^fnAb7v1WUNu^Fi-9R{2$psL`nB&v{OB| zUwIDny%KIR*pj~xVbdmAr1`5LZ7sf%PxH+{WPWGY>{M>ugKG^~#3r4m^tBn!X*zcI zy@YqDw}OE<^J_TR4@xXGn4rJN(Phb~vKaFDv|HZ30o{C) zBG=xX_mvzCdtad5){LF-Sac@o!WL;qnyLr<{d331>2Rjs~MEW=a^dmIL&bZ=~hy!NuzkJPiVQ{8VYB$(Af z;LvF+xUK8rPvZSdPw{$$nF~(eaAEtu+im)0nWbU=C&{zNk|WS)xd5_5wce_ZRKU5) zfBDiS6XP?MWZ$<`FP#O$N<2rHRdl2OnXxHJ%bRPoMyEqxUpJ+Mx=EmfED{3TfnPLFXAFqrwrI(_g zRBG#$EpdJhiITp`GpH+n3%N8~0wd2!EnZ|yaEzU`3ueXgD zi!PeBIN~d%g<7Q@ZftHLF`r2Ndbnx9D%%VfKF!gmmwnLA(0*}tMA&Xu$wsdDpqpa7 zHrv`jQNwmmCzO~=PK0MGe6xK`U8A+kJ<+CZU4qSy;JY>XOrPO?%n;so%<)F5lt6id z>W)e)RFqcV5m*y8j1`9}!@Js4Y&8mX)VIS0F(beHIVI{9Uk~e3k57Gu`UORXGCIAym`0EUG`})w{cl2U<>TBlYx!-B((Rd?iAVHb}pgQ~R1& zrL8R{xCsic=e zqneWGv9-CnxED%M`Q-`4;T-UeN-jTJlpZ|?JOXbDn6O_Icx6(XOD+Jws;WH*!E8Ij238TRLO&F;h^Q)H8=6%IAk%6ddYa!b@7p>0jXc_jvl=O{g_+W zhUP}!=sM)j#I+X!-V+e@o!gj%w-(TFH4=8jB^`txll(JvHA@Z+2!M9Pn(cvEzySiFNDN;`^~9!$QRwW#n<;>@?Ubi zR2GiQ|4uShVrjq9pLR-`fai0LMnUM1?z=cUv`~})<~4K{ui%`dYf8JN(^3Lm&GCoo zv~rxIZs$uQeWidltM3{3TWt|Zg5<^me&Tc5Y+b@q1FH`K9CEYWQu*-xp+7(itc`uT zQd&Zlb|!4LIcHC2{>-Z7WO(T|BY3aKYc@j~>)q;Li|7Xx*Eo7rDe`Q)eGDo=Gjw&b zPs(r*rwjp1`(q@OJfBr=&+aIK@=hvY2>F{3ED;)Dn+oVL14?B|nGRm!8gt4F zIG0;l9%(m>@V~0-=gwW|S$x?Wcw*LlQKAys!bHE4t*-JTZj_b(XD3G>hGiu8)2G|s15PiVD2%9fhOCcz9Q zYjJ9b^vTh=0E@<)8$o-QEc}zva86^UZ^kpTsUPoWd^bD2+4rWklVwDE8u}hi#fukB zWP2#2EHYP3ryo(UWH$*nt@lzKNjZTd+tfq@Z^aFof^u&?;Q`>5Mk zNaRB8n`JEYza#782yx=wL^GvE-aEgBNNc`@vOQ&CXlAGcX_rFri^YV?-g}lK?BS{7E4T}6fFNb$6g6JZZfPR=?9V>t>|*S zw@>!ESl4kzBa+T_Thr$8%&avl>HXej4Ur-(k4Mn+kWD``x&dY;+<>Uc4H zy@~`nq!kcU78^eAhN|XOOAXJ+G1YaHyTS5M;${z=lg}m3vYI!a434$6-3n9``68i6 zvuZ5i27qb$qO7E%165E(f1e_D2c^ypVmV#Yc1+A60NrW12ZWMAkSH03CfB#eOO8rr>@lBX zlJF7>(ieB$2FYvC?`=O=!ZpPq?+TKiX+y1pB3k{IY5|2`kaXm$sHEH{lH`kMN&JMBZM3*wWz!6<(vDneEF59X8$D@(}9@a>z{KSX_zS zp3wcN&%qB$8%kKAdH00l(qSU1IV!_@eCm|C|GW$kjqH0Gv4qa;+p)3>wjjM|CFy_a z#B-$RGMMkpTGF@*Bsl!{T9apc{MwW5hi#~GG7&~Rb?;!;kY(ffzR|Wa zO4S1`boP`~2l58!u?Xl&wPaQsA266|J6sh($*sk$8s;uTm8zi)g2H4ReV;r(_jT=X zmjip<>2JHP=3^hm01;6no}u@YIz0JEH~ckzhI{-Oz&>00LQ{~U4xRO_+?|sYBf;mu2%9P!Y*P3C&c08{9)CUvb`Af#-eO3@L&7o?w1}L zN2n$XEblN-Pp>8egmNEpUP$>9ly`&M*~tE^y`9JNg%cvA7nk?WLa8!;nNBnYW{FrM z$t@K`_6xsvt3Dq$r>JyGp9Eaj#Hw#Is`L3nko^c)Uc=>Se*I3Gav=Tv#sk@S_k-sy zjdx5s&R&7aLKo;P-<#F5ovM5UIuWgaH1k!lzwe}ow^Yzy(!rKifpmMz_jKmXSkdcK z-@_PpSYIcma9K!o?6hERep> zv(I_t!SbVr)?`}1&TL{mTd&n7x%It|3rvZ|C5P8t@(TTzAW6*dIN3`ObSu;6YYUQ8 zz)`#@2|u_vnq9mOq>{t2fEoE-I-INpltjad>reBVV`K4r^ghN=M(3BDo}E!KF}*U_ z_;6ONly+;Y?nu9mxt1O)_;z@BEM~1#OqsJN%PPWzI4Lt?49Vajg$vo~()WFl~Eyc-Xf+b@{58 zD(r5Gi1iyY2JO6+sVdeuR)eDAhn8dH-gfQs`W$jP8fdCyj!x=D5>}XJ6vf!z&=a_A zzUZ$~{7rZM{+k20U0NP3)OF|+I+_EuCMi(;R<)4oXnQcnr>8~3^N3=tpe(rR2Nj|_ z+@+FLBL;}d%5Bt379zzbwUav-s5C%Yvvl{%O`blejZ`y zcmU%;ESH&fy|a+b?e;RoPm*#aU!n{Dh;r!MsbG|rGop;b!Lg2B`@5(c4{pvHbiM~m zrQ#OGP9m9t4%$#AeVd%4nyix96F+CYJPHJ2$fHV zgod_E!yN~HxY)f;paJ+$`YWxw9TZ4GGAmPXu5IL?rEO*OHgRLQ<~HYJYuB~EO6FHL zXzyTP;d}RfW4!cdpFf>3AXI4`gBGo5n5&r2#HVKAqA^Uem>_D1ooSO1ZP?1#I9H7|9lC-V@5*%gAOHiHy z9)gV;JOhA9D8~|L;AIZTBS=39vlU-(Vi@SRngtz=h;tPfmUU?SY(_y51>3vNJ2?;rx?t8mG+69b=FvXbTWp*ff9#SKNQW{bHelG@C2Cta4B zm3Qwl5q287rr_n)hA)KMbmonKCM*RF%~BA$kYblS@B!3IiI+3UYxEkd_k8c`>+72J zMFT?oS6u@31fIWpG+)tFXHV6v7s<68zaLco#XwykQ`=CM!fj_x)U%qqg^DJd0t=qU zMM#{dU7pQCu5H>jQlyP7i!GU31 z0=pHF4k)qVLWc=n4HQ=pv1?X#cdE$js@y{D#dYV(;OcZt3}u5;`2aZ$1j|>smLh$`gGp98$3+n#x+yoMw62|uIYBVJajd=WmhZ;bGhpSQ7Apx6E9&y|ZO z(*XE-%AKO4^cE;461c45W(XME%rU2YW0N?{x~muR2XVnHy^V&sqMnNf&qHZ`j;#Z2 z?st5@Kg_-EwJH|C{?iKw@9*2ZUedRUKJPtY1F!?_!NFy(i4wz4EckGv8uevmCWpID z8dhFAztB#VY%vXx3*3Gi>?- zVj0Z9e5@uiZtH|Hort|~5GmXT8OS|7Jl@gsz4`e;EPUxwD@#_n&5Z_5Zmh+NJVn^M zU{W9#VIA_Ap~%B&th*FP$}!V04)ZWoxU6=2P!9mJ%g@H0%?ZZ;{^2s#P^^|4C5*NT zLJn*zgIDRHmm3y76-9>vt)VFk%SWXwY-B-w9iV6GyK)KaR+#5%^ek^XhS zf#Ln~t4rp89@_rojzub$5hayr3{V^Rpk42r^PSUrZnb)FXpQxEOjh$2Sn1B=r3?x- zdi2mvML>jPe?-)FHVJBUSZ?McwEj`Jy<@U$C&Q|WhF}aqQE6*7BFSlOm_8=~AzTga zBU~P^dMYT&iEO3zHF=hFMva8lh7t*TRI@Pg-O;15cm$CEZLn}neK$#ij%1Z`u=r2m zsDZC9}Y~|Le#4XJNYrJR%|@ zuL)E_RCuA)t{0~V2PZ=GbPeF5#%p~C?^Yi%)`oL-9SiTd>r(@kq!%JmdViA_mfQz_ zx2iH+sH+*D?eg1+&~mIO6f_Z6gLmw61PG-U^k6W;;gJ$(kV=q-M!3qBl$6w3TO0nk zo4&>`T@~Io>J~c(!v581c)9SqAKyFV(d7=YHL}anV^Nt~;D>M0X#9qVN+a;5^ZeZ&1lydLc^)j`BVh=8YmE_ygMH9j&rtoQP_vKm=+OvGJjFC5 z2_gcNh&jPZw&kw7M*eBTc)obyF)?_4q?ihUa>{(JJ8zh@%Or|>Gj_mYq~`|b=zL9$ zJ%?#JLT+{haXt|}?+FZajK}hs-9+p_S2RWcQM+N^bSj5vN2fd)D-xRdDl=qhIw>aI z!1Exse&NTrygTswnE7J#iu79rl&bheMZIk$dAo9&jP5G7dwSJI9;w2f`zM-iDKP0+ z&rf|#!oRF~4OcRxLI=+XCBFsqF!rf!63Y)4$rp|K|Fa4HQvH6h$phy$YUMV2O$NnMsUjQ8{ z?-koBb6_LV_<$}I8E@bZE-6{8H6AcYyA%nXf{4>9w#n4_4 zjFy}Ds^n@Fs7e(WH0Y;dlz*3fu90zidb)<67!wP#io#fZuE&+aScj?y@*2Wdw|_E7 z1!;EnihjChxX>Jki3*?mwbTxznOXic9A?0Zlb+Y7DAX~UDamhqJa%p_+E zZ<0L@s}{8l;f>;}COR`ujLglgP*?_4B+2uSkx=W{zXHqZPVHY#MBn>lzb%zt zkDnC)dg0H(Ne9psOCEiPI^h5v(!;J#c0D!f89r>zPI~(`vYzN3nvJQSf}} z)+-QWh6okd35@o*X=0NExmn1ljmqCW&vZ0@_;yS|<0YZB>2$5K*7^^esiU)>M?Xh> zx(x(K-lHSTUYcx?2Mo2*Exg*v^vOIql8~FT{Cj$`j~#;R3w4hO6Po_NwD}E2j2Ee2 zN=!^He9*cZ1v3>f6U_yC#&$T z9u(o;vW}3`X$*V8M*UYC$4l#?p>|kM)?b0CnDUO@FN>D<_m{g9hfdE1F_3)>v*OPQ zdo8zB>yGtxAYcA(jvVvn9Lqv-U4gi1ilv#1B!z*l&!B&q<4RQ(!BYmndnS<~3lHcS zY*~(FEOnS*hw*J{E5{^6eK@gNezUxnn_$|}ZK_&vhU>sUMX+0c@fbNIpEyL1t(%yj+~}dZq5ptaP^l2Z=UcVtc@3eqH=Zu?IPJLm^!+G8?fiUybu> zSh&wxubvdn$9Gb7y$Wc6sMucq7uLap2M4HUa_&=kixW`9N_xQ)hTUIy9urM8Dy1(3Om%ksi)}sPi z?;tr#gE*7#wKY09mRe3h{g1FQ#`;Ds!$Kr<%!miYJR>&7FcQ)3@z;jS%#OVDW_4f@ z5calfjD1EgOqRI1>W=LX)~UT4s3R81LLS4*_z8^`(#3%$OX}_0oVFR#YDOOVx@rhs z%e9VB!W!LH6wYU}N`1WX8}Y0-fvw!1u)U{R-o~E6zTi`>kpDk;{YN;y z@XJ!CSu^lOVv=zQLZoU~c&aFE`fhC2HS>ZWPWb!1-KHV_6Qdu?Jp!!)E8>xK@!j3s z_}3Q5uxU;PZn$d%c-gXI)=Ue=MuL~*PGANzO7)QXOfY^k>}b8KLqK41UaPz#Qhgf; zI)8PH$VaRUVXppgr}A`3^=U4&^UKWWvY&^Y3*&%i>H!xq4(Gp1BYRU0j_*@|g3&s- z6Vu3rHT&59V;ac#@tOF)15r_z&bu54yyHTy#cb7lt;8^a_w8e7c{$2%7vPldYK9~U zs9f2vmAi0Meqnbxcy-alv}D$OO3*$&rg-(!o&T+esLDkR)l$O4mxR89H}b!S`=c$q zZI5hxAY9Nl>#?jUEz4X@r2{#J)Ruy{~_wFqT=YfuI(E_ zaECx}4H_&+>vGigN8yQ#Hc`-yzW>SR=frOf zCzQb*02Z%vJJCS}SRq3fIzV~i@&Bf%hz_7PApmTh=l}Y$4`9g;R|h{WfFBRvBr3OY z>pXq22tS6$n+!R;K-->l`QQEh8&Cht<~jl=Z^$)5;XC9eq%qJXBa|HpQX+-1*tFu@p{}#J)C0bA8okhd;zgrqTq~>bv;DC3p zlk3_6LyiXe+%l^i_p9)$@GGGi#WIAJGpxK&R?g^O0TjC*O@`Rm8vw#v3s3v^@6XyP zrul=Pqs)fyZ+&j)7(EA#dIct(bCdDREfBgr_n_V;({t3P)<2}F56j@dk)a*d*4DUl zjZIDJoL{P{s&qL>3RIl|iGiV`qXXc`L+0kx3JVKMaoxLebJ@ALekFX8gR@1a!m?~p zcWEn*S9dTu`j|+7n(j0Kq4ytoGecPgH@ZsJL zV|5`_smZ6r^#sZD$)((fQT6eIr7?yZ--sRgoe2KvcVq4Cdg9qY8p`7RQqrWUu_GX~ zftdxy6zGI$-~8^*jfuG(gk3eEMqkW;7li9wi5d zS{sk{SmI=axB%8x(Hs=Fsc8Ih(YQO9&6fYO3x0Js+!}X&oleVD1l>(`VB{2;3RZI| z53=j3q*Z~79DYP-5O6_P5`jOA5U2dHkB)naiiQv@C8*Sfosh1d2odb*m~0heR;8QaFU5;G3Vwm zc!gT*H_*Mky~|?Ql#lxw z5B02JEP|NYXH`+D?%*1`D94btF+W?*jeyZZWVOQkJnl^d;yAtaZ^R<0{yCT2fth>S zGfeuxgXEl*FN&x zDdqxsUTt{EO*2ZXUfsRdv02wEWsMHkQ$ezBvJvlx@pGVy9NGVU^HO$BP)jf4Z+#Ms ztNsDwNfxVvQ&pAJRsscn(tU}t!XgZ-I^74ZO9fWm)}Mpi-x>~hZI@aOZChO2|3KuY zVm=i&ew3e#^W8LT?;dIicz($j+V?i8StK*O`~{ zw4$T7Rys}r?|}2cp$BnPOCrAQgT;oBB81mMBDqQ%w%E=4;3Cw4Hc!9g#GLA18L*t8 zsY`o-)7eKTeXMBV2Vs^Dhx^k%0bLXxsLemk-)SwF{qK>rb={w$_((+bv!I}le3%0l zFevl_4x&O6NU0QMM^Z*+L9=*k+x>qw$9C4$HA72A^@Lj&`P`2GqMZ}t`8Mo2$}@{4 zPV?F5l-#h>fJ+4ftkWjivNn2bG(j^{Q=^lWCZNcBFDolMA-L-K;lqcrNq>OEyaTjf zR{M2+ZXOq4*H7(HQ1NE?%p`1}QVr%CwjqG*SzA}k_mq@#d^`kE!(Cu+U!Kv(KvE@=ELLLCbm$mDw ztE>F!C!I*&H?PPSPni)sYxAwiPAR zG?53=>b`1ggEi*!l{+3H+S=((>!kc1m@qIfkrxVgFcrCZM=FE4N3@-o4^*SOU^|D(s!@^YD-F@RBxcG8=jc`?7eTdD>I#NYH) zL}cWz2-yq5swl<(*{(sZ?O~W1W&zPaKd%q)%*+2v-2w^_{WbQLHVHrHd3O$0M`=| zfEa{zcl);(Q*~c9uY$$I#l`Bx~pk6lt315rnR-B?sR^j;e#h!oo5_OD*13~T%GD+h}Zw$Zo z)+}387o%8=pu5g3${5&}s@|~-M*E&o9!E7ai1Z^0kNj|m%)xYr*EI9v&*62vqH18= zoV!RHGoY<1Z~qDBmt!wHgNEMymHV(Szux&yU1~OIRdvpJe(x`Z0?3kb3bpMrH~i(2 zEZ$@JyL8*OC~@Z@Wuf77#jfK=NCH%qoc7N6_JtQng#>@2}_ zNoPFIS#{$1i>7~gVm`Eb=5RhYOxD`Z(x_pmwvM>yiyXn~lLja(egf-PZ2z z0+>}_`w`s7)@Q_(moHZC=s;a6_K+|II?qcTx_`md)@{+=B5J=z95CYJNDfSW&6f~$ZSr{%nkm<%iSfp)( z4Wu`wUl!5P(P3XI)WSRaj&Bf9R8(~M*n2Agcs52!$}!X>))IRZrfcyr#9bVK<{Jj! zoKtn*eX}T@c1=$x)sDO)Q$+xdwk-FTA;o+!T-lwBi@N3I)uMV%SLxi?vy=sil*;!lUH_F>Qb)3-78;ms^+gezI=|26rbBB!)o`SsJ}UuEQ`X4eS9on z=Eseh{$Uv%!ApBc)vtm>9FEL_fxDzd>A7YdGM*FhGX>9bTUb}Iehu5$v>Kd;`{f&) zsC_G5MCvQ@fu?@QaRsyR={!F9Yg}k>&ENg?#C-3wtubctJ6BDL^Owi?E-y~SMm8#I?ujlt2=f8X{uMOSSnXM20QVX_%Fh86Ccrk>+>h{0T* zmd$js7tlne52Gc?b9rLpXq5^7wYlMPH9ay!e*Ot@ev?K>toGO0TrRD_&t}(bS!eX@ zoLrxO5l)oA4F^Vj*5HxYf%N=TR8-!~nDN=!Rm6#U>w5mE=9bv9*e*^aSmw?dP{Fp} zFK_9jfUP(^*D@3W?B!^E9$gca%CL?QXHE;+!GjAv1D^|t`NFhI0l=(%?fccf!Ts&6 zITPWo=5f9q7|O}v@pVfk+#T%k-yi+-K$rRt*KyXaN7>cm>jt+ain(9$Nn#Yx;Ki~F zUC*{8hRV1^o$YdF&h10Bvi%1s*aT5>HiCxJjmNjVY`kvZ00+$p=aRpou@rG$UELZA(vvv9c{A+U zcV4FidAyTxIluyc$?jkpqN<@V^giK)Iu4PPcGO|3nGALcr*&rY5olB;TTcPdGqtzOAzl6qt#Q# zy;o0JBP#rlC^x)$-_kv6^YWm{WiPjFlU$EMswp?T8Rb7|tjN(Lhr`C6+OD|>oD!@= z4`NBY`@7R2jt&847#1Jws6D~RUb)=C?S3Sgw04Gf8ex&~u9c*;N+S=LXyCTi)=l6x zRlv&1&;D&ngAbl)0oIKg@9jVY&T2?zh_3s(XGwXlw57S_OZq42TUrt?j&aMp1c!w9 z1c!=FX=!Q8E*i693jOMbYc3w&1&6}bsj2#({fQ2j0BJ*(TCENOr)+gz{xMw4>KT=>(|cRVxixsz0>ih3 zgdMS>9C!+sS`#(<4wGiO&U3&6M7cNqm6P*`st>hq^z@gbKUs1ysi2{|V^;d~UTp79 zGAG?I(+tyDpFv#0wiA$6ygnB(Dm&o!4=L!?<^4=`hcpihTJPwo%>Uxd$Ni52TJLt1s3_HBgzitCv6htjF^rSojg-cCNRPTFMx8C zdz|8CjVl`>d(nsbxi`~egJFg?F%vL`YGJuUcKd0_rEY!T-3|>5O|X6%S-4o&onF#X zXjR>+>8*s26nlEunevl*P!YCt;KniLDEt~!D%sLhD%qjbKfg%8Dra`54wA>$(~Vy&{Y*uP9T=p5a{Lgbr+s7c&F)-libY3e6gg*bcyONM|{x5$Wzv zPeW5I);dzm=6^qCi2IQW&IH0OLUY86?Qj9Y-h8vY!>}oaS&h3G{d0X$*k3T18XR%_ zobBiG{6=q#a|$0EQw#4n+1ijI&49a~-JSrW6_LZ~(nR=#Opvj&v-5TPeP){-rnwV= z!?KGx8KukIe>7V(TR)p)rjtj^q=)b4H4V#7|H;$uxmb*66phX$r>0iC;(yl4YgP@i zC{Db5M2bIHnX@Mym~>uG@2U>`Y98w3{q2CSPi5i3be17V=FF&Bxb_1Mp$o0c8o~2%%Wc<}ul2@l zdzi$(`8O?3Jr$3ajgX?&V)r`0?Hg!(i)p>50p3x2O`8_S@@rGd8}3%=#QVexXgcp| zIw49u#dvU8y{U$gX}2$7#r5?K8_d`dq2-5!hqZf~ipTZq@Kl*6Om77-v5ZjkuTVl< zuSwDTA$Pie8Bof9`dz;+jxm!nDi7w$a}T}YV@NY!8O9^OGrqJ zzJIu0G&JcCMO*6dY;d}~xOFDXMuLEriM9dHSuc=rT}*x$O~8H*5Pds=2&BqZa}U=f z*Ah~nw{} zVamFy`k`WE?`#SAVqfDTNoLj0(M2Mx28+$oqIB8dGe!~_`LN_V_705iJ(Hh~vz9t1 zV4G>c=Z?sB7ai?(|8cRbAt*iO;Z53z+*#HCB|o%&1h1oz5O3bbbyuieb-`xp6dHO` z_`G_&ujjJsD>GgR5T+(%{tAw-r^HSALlpfG%B$|*R>nZ2@ZkV{n@*AQaKg}nWs0GK z$oM2N^jgEQ_TQL@veSocXTo?L4{R%m5?<6U7QuFyUa4|iTzwlXl%YIS;+mp%3E%If zD6-?FgL|szJ7g^4J0K;p!S5^|HiH)B9D6fAv~E_7#s;5xH^Jd|riICoD*fs$?`sbjsx65fGH=h3avkz9@yuE*k2NMFzywrf~`#NPbc)YJEB|;DGZ++Cr2+NGRNw zr;a~4_PKPgHtZ%{UthP@$>5H&P5P5Z#Ah=LR+8tTb@=0c3upTkYJkYe*?ND!m%9U? zUcLB+K7rk@&u7TroVyU`uPKW1&rU`<&Kn+b&koM8pD6<~DSt@Mg~9{XKd;x-zfaNz z?Xs@CAUJAoKI9Wy%&RpIHDa052Cuk~Ot<1w#G6RTGB z9VQ`{Y|6Q`Gb52Y}-2vVM7Y{t#B!%@$kNEqd96Fv$OEnv9Vrmees zHbELKE7MC4{m4NIFZ!lV=fYrWC5ulny%96q>2Hf*S}6irXYA|=z#PE$W2^H$tj`Hb81;L?cVNP z>cj+r@dv8Pr(2Ps%p4fo-bX(vAy))U`{)XKU2rn1&6}oVZSN5np}$v}(O?!|kkU2v zEiomV$sm(YWkQ*}vMD z4p^}d<_M=L*Z)M{%HlGnOLaVwlzZ4ZJKNZ5OhZ;|9XW|?0%^krjs=>*X;lYIC_M9K z74!nFivKN=hs$pz?Arn4Kw(h`UI~EIVg{4fyJDI@d_4Je#nL^nXWtR4Z6et_>49P0 zCx+J5T}9{YEZNgyn9s}Dpp>zUD%t8;UWXMJ#~*#i==T$nK?M6_56*Zumszw|AvJ&d zNJ-^k;*zfQr6qJ@Rgv1zk;3Q2OW^Pedz*1qHWknivMwuMkvw*gRx@JfuPUCU-_J&a z2=T0qH_Na(@&)a1`f6^ZF|bsHBsPI6Z+VDN8?tX$=f7XCI~%eudtu^9#=;w#EFC+G zD7bDcEGf#=^k0$7E>n62V*gV)3341M3<9>VJvGp6i#(8ACM zD?}CSPKXwFRt0%=R`nu+!f+u{c*3f1c(MD4G}jbeeFdyIXxF z@Uiz6Njto3-`HL<*b%Jy&VU^OU!$1&LcrB!h0GsXtZ~b`G#6K!w4-H*g;FzYVrhBU zZX4Fu@r`&1943ah?$C6>M|)!@7~Yv_w`qFdJYiEgX=_*6?$H9i=kM?RjkBsrDs1Mt zQG~EKpW7#elX&hQN3$7}8s$Z1Yn`8YD#5b5t%MQQKTK)JTNVXh*yxpMOI_%CPIFGo z<3tT^-GW!Q`4Owwrx?U2Mo9;4vCl1cUtV4U81WLq*>hJ-P0gnhdw*5>L%6C3$|RFc z1zw5+hd+-5wjZyCgkG?Ui;Eema$+Tegd;cOVwDv&H(-_*zX>REanC?1k6zs%B6()G>Iea}JvZ z&h84xlrkR|U7Gjb|5`QP_XrlhxWveN%2aZM)WfQySQKoAZUFaR3hVwc3iELbfqUAz zhY34ETus!AUL!-IpSG)G@$2*|a**on{9xgfq&V&qL)4>WZtQP)McK(W83`lss9)Lqs z9hr>?W%|Q19wK$xw<<;0<_Y+X4Q*-3Jh#hn5FLtd(bU^aaY|iBsC;nPtb=YJ0T=b?=&P0F8hh! zn7o&U=yK{rnr?Vwn)gaVdfZGpm0D=*FSp;5$9rtuc4M@N*`E~&M*$8Av-`1#D$7$5 zU|nW^usd8!NV`wWCA%D`+9H@HXcE9R%giVU_1;wE+Ja6YMn*Z;q}d=Y#}j)u$k9GH zuItWVUE4&&EXQ=^@w4HMNVY4e@m-7Moz>Ay(bCmUTq*k$#f1-@@g;oWVL^2w{of`m z<5nfm$F{?NV_4?qk&seEwXpA-7mqJ7ukC-Z#kJDPc(XzUZRoW2X8oa-bM6IWrkcRL zYV5_9JYuS|2%hJudkD*3s{j2bYtg-$t^eP$Zk>0PMATlUp`ji8j>3X3EJ!TJ3;QDV z6>>>`5N+f4{Yl6$WP6Xa=b%1;w0E2;RsX;4OtA1zFe|WAh(B2KJ0fS=ZW|q>#tc9u zgN5r`sCa6S+Ot%R4}Sz@Jah$eB7;wP3c(=+PDiW9&guc|!|aDyi*7Y70qyz$LFrY7 zm(EChD4@sUSVs{RDpEfx>~4Y|)z&72a0#6xKzX$NI$(6T?UJO4F66=WvC-vF?X3&! z77)$;s0!!5sPC>Hl>`>-dcIlpcsMAz0E(Cj^gSKxt9#7sNL$+h+`|QjM5AbQRmc;E z1^r&)`Ehmo=~u-btJz}uTbe-PK!zH6B&WwmNW$fWnkU!f7Jnn2G1(%*7L9%~oOj1W zF;(Kh?(xe6VY>X$uKgeTy)VWfuRf(`pq*>G)%fX8k#pkWT*!A@Xg~U`ep@UHBcp0S zVzmKNRZiJ+mJm~G^lApF;m)2Qpk(pi|4A$tnSQfSj8eWS)jT#x3p}q+`g)3Riy_$= zM;aX5Z#{LR!FS|Q+_V2C+rKs5U!LMCQ6As?(jh1LfWCl_Vav}7d{*#bwY;~xpU2Er zHYe$w)=@Bm!u0v9@KFjWuQQ?3I7TV1qu6s0bcM)i?6O^EDABWxZ1HGASu8+5!3M)Mcf^`5+V_3oI;W^SQo@>QLICl++AfJ%P*c#t z(C%d(1)s_qgyC>pwmA7tO^DPe(BwF+5d+H*4yvYG^MqaK!ENZkMz3pcu^3!K7J7CD>WfB@LRZ*WsG=%F zzU{e=#!^!&c>f8ILo9nD5hQcS5kG3@hPZd#31L+7vtFDk=&l{1Mj$ZdPxI-+3>&z#pcR6^v_L;B*4vQEu5YgPs31y?kuSIZaC*7>Wu(bF{v z;p0do?0XbM)o(YRkpDtqHx%IoWOBq!g+ujdw`s z{o2a$am(Cf$BO@=+w#eLVEV<`&N~0GYhxWh!c}reS)XbzBCz=V&QQP8?J0~iPN&e| z5x;2B0<)wjOR+usCkV?gd;K54(dWtr4aBpNHMtNUdnvD(|6-4Vh!~KZob1FDODeEp zWbI)Xx!8Eps}OnqV>2Gj)uXeL$(#Az!VxuM;YfI1Qg>$d=|SEx;#Vig>)nvhIn;Tw z%<9P9=&byXryAi4Optnzbe}4GzP0!4U!Q-jV)@S+f`>wrlUZkW%5$7R>4Ii?Q7BCG zG(_fKDNOTfk#9n(<{2=TV?)A^$>yQF{hDznbVhnXEoM~qnn82o%wO7h-iK07-q)fC*}Ai@bQNZH9n!Az4sQOdi4A* zTWRU(vlDqR7C2N=bL)(%&vDBi1@+x9`ZmbK=KLkdTJI)sYmqN9Do1({hwK+cO|3oO zaulT6gn+<0Yvc%hx(8>?Pri&41lEfq{9QXoutij3rCF`F3~ssU!ZiNWKI0Ey3k>5M z@oBQvMZQOwVf(f|Zzar5Laf>eXP-oIQA0ZN%@|@vaI08wnhXTMc`Y_=491N+Cg1mgK@1xtb z(8idRm4h>2TUctazyLBrDNIy-F?)5e-6!t;0+8f`xTEjtWk7v zTAU3UmQKQ}#uTRT*Jc(^CVF#oWBb3?ql8AU&+8<-6M&P3qcJUR) z&s~W%Vb+>r@s(0#PXck|A@>y+7N#ca?7mhA=jB2$10xfRmYw|!v*YpH-{@qpXy81~ z$d7$nB$YY%-Ya4mJf3_7B z--(x=OMN@EOwDZ4D8VyoR4V5IyNt(TID!EwwRY*9lK(>@Z|%PxAkN{}ZX%|QS1lgG zQP6ovVXwl%zdtq|!2^Bs&CSh+!oU0aI{F9rZ265gyw5tt(82%y49>j(IBM^&U&Z~q zvSK1nzIuzt3qy&=MtZp=uB(vAv>$Yif^$Xad(q{!n7@fY=utG$QXN%j)|7olZC)Ro zItAMKtMW#0e(bwQy9cJ1oPNVt9FC0${0Yum+G)I!SPRSa)XbHUW0@s6qJD$0telW1 z21pkMBxm#Vc{De&=A*jD-<%;fr&|?qQ}-%!S{t8gjwlgv zC#(#`n`)}6@_IrxW0`Pt8FBgjHY`%iioU6%{I2P6)JUN{Pf0b0Ag`+YuG_VkTt&g? zSMS4c>vI!K!Dg&Qg_=POL0Zg|S9aO{qx0JzYayBTNjr<~aQ)*os0y?b{xp%&99jcM z$}B7_^U(cdO#_w1yw5QU>WOeQVoTYX^aruAu?*^&Q?)FEHU?En@5kDwJx7joeI1r{ zDp|^U<$Nz75dqED*Oae&*UoNk%^6(54r&|OGUT$-(DmC$@y<^`qY#D(4crA7QKjYO z)m(*u+|vT$%*q-O;Sn6bT>fO?T0nkm`i|VKY!-49LncJ}X3l`f{d&iExh4DtD7WHGT|zPhXn0M9g_`j(&;T8xNgt zY0CqcsvQ)3eu~0EK0Dqd=g=T>+v#T_SwP97J^vrc1aHwQ4gc#6s8HXp_@;>BZpq*Y z7G&X01U5Hxg@O9{Vp)GxoxcH+tHtL?_xfvZCPlu26GiSkz#M%;Uif}1L@`sp;S2w@ zq2fnB+}e5ANhF-MRfDKkPCekj#>=H=e<7JAirE_f)pu3rD~bAlp40WZI-z^A7MEN+ zwCnOFCe!6II#}nGzu}Fz>fwdX=(m#dYI(eQO&TLy*-k$pGp!6j zUef`17{5a@b;cEP{ypA0klV8y)W3lcl zt*(yPGaJL3&nkBnIlpN%Kh>*D z<8ki6^@=m>#DpwtjyRH=E0? znqL0Ta7X5D^5k_eDR?=2VeASE;vmng%~6j-<6iB5N#yWsm~w@S=gYLQ&XZzXGfr z8ra%Z1QArCl)Da{j&(@1+Rs9NwP1MI9*mlo*#nxtj6lbCZqH-84anP3d*JF0c0WvE z(k-sM^#RULXC~Q6fP2+Y;2$&bo>V3VGb69YJ)ikRR+)7iA+O_?HQ~o+IR7Uoz+Z&g zcRy>i83Wu`Dis5RY|)v}!~RcXm%~M+0}RKtTEI+meWb1p!=&XF@Ci{%X&%5Mc)sY0 z8ANWT3|44s1dIib#jXauoE=NQ;Y?0J7cj7(hAfxh0fRN#gWueTulK-tZ=3iup%Syec)LGcvl-&4ojGrpZl zs!_H-Po$AIu|Uizsq5(TH;#_%k^=d~tE_jj@q-rR??8-aH!S(rF);I;%&e=)xo;*{zRu=uJbulxQf1<@jX$TqKK>qrzR`^Wuju0W5EFlxv%P{LZV7|$qU2<&-4?w zK=UKwP1tYr^y_}n=LUy8RaLThJzpj7MgjqNj7-yd`gT4>_Pun=X$TUIny-o*($nh3 zOR+HRqO-+^KU*;VYh3el-zTq%tYiO@Z>~C)mnmD!jNIp#N_kT+tz_XGFi(_5MY7)U zi1k`YOy&lo5;4xV>BA-MH)G@;Rf}lgXCQxoI=zp4aYl(Gy?bQ2*sP*^T6EQS0@tq> zPZo8HOjKtxNEp1`x9PTnrB8di%XFWEcW!t*?(48TTn}NiBs4Z1L>BckqkX|rb$&IB zL1KN$s#e@XRdLJPeHQKe(7CAfz|1-=$S2*b%y;sSu8SluUi`kfc8BdLRbZ zqj0KSo03i$^rsS2lsL8HZE2Yr2cq4&0lT z*cBtr(l6=G;yqKs3b?V*%A00zWG7dW^znYG^u{J5t7*pKdf9sQ5fl;v7TGgE_aj@^ zO&}tq>fC}9sc31XL`1t%&*g%X;e32zXH8kN!#9-h$w~Y}>^hE$ff>JH+=d|uWIpGC z4}M|=s|igKK%n(T|H#OQ5feV_iqOkN95Sqp!zIwX=|f@Z$P=`e{wzFh1z2`(5D)*g z1JAaOOCxcc-*Ub^;F%w-8hgm&=W_6zY8vy6&%R%}&l&7OGNA{9;MQFloHqXOmN2Hq z5&m})eQ0g1ToLtRuwfUW@Vy}QDWuUZlP^1CU03DwG%~>cS3xAFReLRJSiRz5EE3G^ zM%Vyzaq+EWp`IIzPxU>HwaY35fB~foSxoj!wOq?~iF1gbLLl0pYX{WLt7eE+U`Rx?_(A-LOVH_bg3`Tp#8j$UMSO>Y(D zp-(`7!AeMG)*?VM8}fjbEk~w0XlQ9GCjLF3C2IvIlKjWXx}*aa?tj-$hAVuE4*hd` z9{Qew`t}l#fXcqWpiW3hf#4BM?|FbH%?XLF>3-AYV0V0cT>VtB&*Bv1Qn&806N9XG z3Hrj6U9Bss6l5)S18>sf-T$MZG}qYEQ&91C*CB*MiD0&A+>#&?}*^@l`8)&)S2Ns1k{VKTFmL5EiFzi1@xS%m|92n0Dw%xH8FrS>AxDz zft?t*QuWM-nK> z>vACdw*v<}^P_4Up_f*5hUZM)@b=cmHEG304@2AtM_q8DBVQ z|2Y2T#iO@ayicPBh>MR;wE!l6Cpo#a;;+|_4^(ffK)rh&T4 z&&h^m5F2aFI%7y#dWH!8^8MwoqDG@OF2?OnnofGN#7-*2;-4`(4td=n!=v@BUi{*lhb@GmeFox*GaWC8oGI3V z)Vh);e{LQ^#v9GrKN|}=a~G3wU`hix~A{bEBlj|l;iYH*~be<{(r~_lFts) zEGxA}Fj-ZV57!x@z`hh({(uJlg7y}`qQ)9u*6@1k|5`$ry#G!LHm&^>QZc^bucnz# zyG3Jj0w{&;Ule%PncI)+@g`;$4lTWWIy-M!1=0>qZLO&soHxs4Tm04+?8eT7Zaxw# zC_I`8i>9(}(Tp)RHjy+0Vz-Rb=6HxcE1XNU-UDTp6b~IIx%3dVNM2i8o7V2iz-50d)0?Uu!;VT>pN%F81EI;{&8=*SjnpP5pq)jc0f^GM@miHNlG2smY1& z0Jim9Fk9juT6bxj=4C<2)-p{Va9m*|`#`fwolMAb%{5utwZ)O+Cq&ro`C!>{1a{+F zuk5Kgi|7dz03<+0#n(WZgA{i(%j-%#`Bgy(ca1Zic7N!Uw4DYu^68z5)qez7jFbmANui^>_HNBwR)Xm$9*+ z=^o6*MZC3#_M;iOi$0;(h-sGYXtCYh@(X#x@QtCBB!E~q%haE0J7K-f0HH1Y%|2L% z=I##{&MvL2Ks<$Z_LhwzRzJYdZ5ev^#3^{*t!05NTEzP1eo#pj+RGGem% zGc&|e%RMx}Tzq^5l2EsSX(0i@RZOBnPyYpKKEn`b8Y~?JBpR{*x$PFYYnaTFbw_`0 z!*6&G>}*r2K(j7|EL?P8+PcuK&fHvylDiFA=p;(qTrjh67>!Lq_vY_5g!ZSy7UN(j zg{k2n`*Q0`ox=c0b;>o{fB%!Z0GHY`I_RLX;4wiU|Movp5O52Uwc@X22a z$S0bS2m=i8`)I+3eX8qr4&l+iq8``4G?wj_?&qouQX;lG7*+elE8J`OPv5_LypX;2 z2>D+#LSK#ycLrDO&s>K`uE(p-;QlHJTqmvgHQE(5rIk;dZYl}hQv>6%GB^59GQo=A z#0N!_F2x~&*?zv2>ek76lLx9e{va~{CzbiJKYJ@U`3$!Lrxq?g%^yO+UmNkZXjG4Z zg-Vn(9<=md1PB?C2S_+8jV17{^Z#RNO3o+H+3(43Xg&1|hMfQ^EH6%p&e2H(q%l9y zKYjOfa^aeaDk^f*Fykg1e0L4Yc=m*IqymvklY$aG$^OE!EXAd>XoOdf8|2&H_mr`) zDc=lFuBD;B5|^xrFr5Dh2U7AA^zXVc>)DmcqhI0A=~_I!iKCqCmaX~0YI<( zO@IKl;juB*2{tzxT2te-iSu)Z_oVxIp1_2giv?YK`mj$&htr&rt)hDwf>3!SrJ;8i z{CU>r(_6nhfee^I0F>UBzvAEI{Nq?U(N5ANE14rp1Se)3I_d^GDNXWfa zDPBtP;&(3d>Y8Qnaz3$nHyJk7D&rb=*-z+Q%}#%1uSuR_ve1c28xx%=b!Vo<9nhcz)<-{Y^os&$~DhXQ|}_FwTYq zyb7J5=UQv7)wLo51pPM<(Rub@?JOj1SxJoxtvm1_D+E zwes91O)_9t%R^H&@T`;GlrjR|`F+R`DwfWxukY`#w%*TL*R*aU|F!2t}@*(a@UvivY~EPe+Ijeys+=2H9@hbthy3tt;!>6c^~9Bq7w|8sU2dFG10sw#MI zy7bFpQXq`a|55insHyctv-@k_CDU3nDH+x@)eu>WdI~RjWiF7C==5BV8$|K3)qI@r zud+GF>BQA9r6+N(pSC*eHZoFPWi=88ygx9 zUA%--jvI^aVC)-be{CRm&q>5NH$%X?86xsUZ@!SPIayqa((i=fJxv^VHConQiNgik zss6~TtBB_tK9lfWCBd*$qm+U{bzwmZcHFY9;vkVWzvAT*7_l4#xV|?=k&WZ{;a zC7@?4;u`qYj->4mu+-06uy8M@AjGGkjUP>(?AD&aq6=X_1skN8#UCTqo33R~5_ir~ zTyjgYi3(tCMF<60n5ki*H1wIq(qV}W=QRjYJKOD*j$4dtHq3Je9;Y9fJ3**42q-F= zEPLocEsPDgoEf2}COE#WGQtIc9lGE)i8k768!+}SsOMhx-MGEqPiV#iW(cz^g#6wQ z!l8W`T?Uo^Fo(e60y*ecFbK!KIq&qL=J+-)2(P;0IbALkS0#~FfC1uBa*4(2!Tx%v zbOy*2Dpq|uYa%bqZn3-Gh>;#Y>kjs=1;mMZ}RcU(5r; zttRA{dERlcbSM{<3b`LuEj8N`P`-n8=m?8(;cz4bOgq+myUqQ+GKSFN<-#N26{3l5 ze!{0cnF9I`z)zFg)0kz^3Y>GdZVq>M%`sCBqnf~;?}Ds0v%jB&2tvCNS4dUEa3Cf{U@=}kGps?KY5w8{0{lsKmz|at|J)h7s3F&(h)O11%$I?m=g-|ySYFHa zANKj3meZ^EmNm1HUb4OJ-%H;;zYlMHYB_&Ckw9!aaw}wd!1@c!PWyi}opo4MTi3;< zOIkuo8bs+%1tgS^?v#=~ba#W4Al*neNOyyjba!`2*SC)E{rp?+bDw?oUTe-V$NY`< zb3bSUK7p=0#qlNGoULYthJvj*_wzFoXASfzwVuwNV|w5)-h>#vr=Xxc)03cZswl~ClaD&8@ zomzLuOdV|?ycyT2&KQ?|*Iiz%(JA9gJoR8eDw|t4E?@Xg5OG-|0ZJkg`1?n6D76@+ zcm^ivioXK1jt(fj)fb@)F&{Ch6^J z8yjYcwY0t1J&EFodV^0dDql4UfBma-{+*w>elbVG0QN?|ZD!q&TDx<(8;(YhRT@Nt zc&kW9O!NY9i9$qjOVGd9ScnW}Arw}*_HBe9-&hHV zel0HExv+F951Y^U@dGc-W$WeSOz6CZsvdW{Vf9gy!dH`^2^Tx;^JM~T_W|^ihX?D5 z2X2Bd>t)Q|y+9gU(Jd1L+&glYGpD}ERJcM)s?nePwP5=P+9=AqkY4hT_88Ao`(IT0 zv2XO*6jy%>%5fqJW!Qr>ByZnkGDIY+1*M1wg1cK-o526WkJbJwgHx}_uJsPp(C2Vq zk;bkUyaZG%9;fn#n_N3RpDKkAE93Th`jH&wd*54V0o1_t=6zgeAV9 zGGC!jr5c4xb9Jv>4m~J_UACkzcRw6}om1s-O|?rO?~ug;lt&lo)s~Ui3lIWWNR(bY zB9(8JZ)N4g5!IOEM4ijRy&LygTUaQ<#5jX(6Pe*VX7(E7yDRlzF>jn7;-V{W_qxg} z%SJ!-CY!dy^m$ULyc2Onr^rTd(j4|=QBIKX`W6Nw$08wq8QmF8I6G?Wb94~SIU-a@ zjr9*~64<i%G*1X16}WL@$Rg(q;!Ai(sw#X}^6 zHv@S|j`L8bImVK^ag3tn{m5;+Jj$fAkrJ%ulimv^_an6tb4ZI$u9=0-*)e2L`-jh< z!G*$=J(grcP8cXKjtg{GX!TK3O#pGS5tgh3CTgqg%ibIPZ;0Hlb31T~A#xe*f}Uv? zY$(rA9xqdj%V0=MM%l$> z|KYcJRBN1_P7z1SZO=|n+g>iHT=S8_lUU#iI3mL0Fxb8_{G_bfj&bD%_6ZRJ6BLGN zzdVQid_;_afiZOFU~Z!`%b0e@wMxGqe)e#mZYAp(F_6kFlt4Rpyz1F8?P}gyKEe2| z>TrJak5su5hjZ6=R46atCBnjybM)N&-OcBB4?p)l?Z?w#1Jip8R-}%RGWtmfU*P0c z#mr$!sQ1rLldB6@O%`tyP(ODtSCGt;MChd|lfbT>%Q-d)lO&gP5&v$Y7~0AqDcd*s zDi7Z-5izwERGoM|PKAAa(fP4%CC9f^RJP@5mgN5Sk??+h^DCkH3-Z={c^O@flZzOu z=XwSfZ--@|N;tv=@4`lZ%I;j#dunq1A-0Z&DF?XSD1Fp@sMw4TQEc4*2$A#_HK7$o zNlh#6%xhPGebph%1y6ZR9ZYe((fyh<6<)Mh)fwPz5U|uEqso7j;nL!ym8s{NoSW{+B=iAD-MSaVgzgUo zd1(rZ#k|kPQj(;FO-e?$)4g=GmCpH5Ajobzj#RNZ$vG)DJ39xNp5DK`nx1Okt`39uOuI|SoOWd>H7Gs4+>+<``$28kfO7<8L8o5( zA^^HIn)Rq+*@^O#tjNFmGm%PCk6w`n8%6*g`Px@>1yor=#3Ted>{7Qy)?d$(55K=4 zG4dGVShU|Go-IB(=n^x#Z)bfcgD22pSDc+bIjJF*BLg{O>UP8ZetLJ;VRI^aS_k<+ z%jBF>XZfdpTbH3t>@!(+*>PA;_2&=zx8LpI#ZKFJ3=m=?d);}VU#K1m@Rk`URfZF`B;uPgwtNV>D0G=>id`Y61M2D5j)HnL}BElAX+Kl`+9BU4T5 zZyDtN@ex_zMjkfKawyPC4FNTj*uj89{S~xTCTiKUtNTbOqWUs36aRi0E0${Kk5l4q z7(uG^4ec|Ui3tS-%`mJeD;8LilAc%lKTml@6doG$Qn>lv-RN-9k3GwxFg?$G;#WhG z_i|0K&aJm|wb!i){FP!d%&$A`35O(juU+EBtxI(Rb$(w%f36tC@u@I5&yoR#5 zW8|UoxJ=5@XBkwW@HYEBH%y*^WC~X;^U*7NtSWq_tL7v3ZN0b}k`JP~0=j<|Kbr`U z-V5mdy=Wl#_h9pod5iY#4aUbber_p|-$N*xC?nf>InaW9t5$Hnmpo#AW)hjJK)n!Z z(N#zr!RtNXP-p_X)rGP`y`QxOgHm{GG;(4k=u|qXGDyVZZq({1(2gT2dWM|5BFapH z3~p*j66WwaxPQThyym1Y(d(!9-$TQ>&=jQrb*u+=C9ATEd1>eH%6%&%&urSxtp|YL zD4X#NuoC*-!spv#wBBqbJA+B5GUGtYryn9T!q(kC zGV>8%71xbFM3pTULz4}Hp2xcy%Ve`Mj z!^8Iih3vMZl?mcXZyvw5L62pkrnT!3*{AI^H`yO2A0;JsrewAwx)ijE;fCI_kNz1; zVKvZe@ZdLdm9zZ>os^kCm!yddDgR7-BC?kNAg#h9#{K3R<&88rN6#RN6W$bRh}}+T zo9F3TH>yP?KF0vl->XZef1A~ad1|tu+#%|}{m~^*$nN$?D8XMfyERGIU(S`u$A`WR z9|YsT|JK)p5I^gwFw3%@;+=0IP0iRFyCJv_J6pIfr}}Z}Z~)RWWPSkN&mRajs&;w{MgchFyp{hM7lBTdY&PY18go~?ZRS>1EKdo&2 zQFi$l^6BIByS$LVH0J{V(ecg8qXQ^9D00c>PEtmu8-yq;8>k88=H*?PzowBN>9TC= z?UlHw(%T{o7tdM(PMd#Cw3@OqSx~D4JyM8}xJ+Jyv9s5AQQg7A0O)pkCHfz-x=2Hh(20f5gz7`LTqSJ3?$pibK zw0{CImxbH~C$LV2071l)KL{DT&WN=F0s`fh^ITB)Phpv`s#vh8kx>8?G*s4fh6*y) z5P-Wt`O8NRXC*l~Z>Z@*JF?uRAph!rQtg}^*pcWP({6iyHJ80DC3cf@0(ss>fyR7Y z?@!goP86|~wpU;Hd)SGCnUS#@V8)aze*wE)fzRf9$VcP_3*CAIUY5#P7=C7Rp4+*a zVDvkc>UCp0`{)mdU2eBq+n{n+>J8kf4zuX=NPyU2hu(>F2kWoTH~ z-NnCm?GqT$Ymz*fn3%ljrAaP!M53*<=mvLwDO*qr*fX>`>|GE#J3Bk+ql#R~juaA6 zQi>&-uW1olt>fY2)A0E$gfSgh-b};fWVyUyyRlpnPeb}Typ{d5$I5$ogMHgqXgSHIj&D1@Dc{O!~d3OLHn zxPHFy_AD{rwo`L7p`l3ZMO~zX$H&lrpV00`zhbjv3aBi5q3mWe3$~o7*EtU!|GtE3 z&vsZIO^rev$t)k5DFfX)NG5|QtJ|f8+TPUosTGGOKxg#=*mA+MU4YM)S5UCM{81Vb zbrHdTBsc!mT*%pi(qFs9ofk}+(UoH@H@gk1-7l4Bi5+p4;Vtu$Wl)tdlTl{O(WSn6=OuE*@wCY8e9e1q?@Cd$QqM z# z7ozpfzrEPBkWrQC?%&qkJ4}I-XSuSeg;w^bYWIWeBlJ84C9mR|v!vo2I85_1_+$RC z@$l4%X?wMJb@-!zFdH^-S4{ju;Ztv|=g&d?5cuD>`KQ67VUn(tvs)cinjZkA~_-y z8d_+ZA}|^}xAtP<1vODHJclwtYP?-Q+mO#|k&g{zAl6;ugSfckojA()kHkRf-enLr zqs+hyGY2YuXjpNvW^F0&^}GFBww+D)s=DnKl2edM#Sv{dHqIMQ0U41%|+VTPU{`RsfidmLkmRjU^3aH_<**E|7*=aSwM zJZ6Ra4fYu1JbI|FNd=+S+lLePbW!ln%H|q~)an!3r#ewFmXI{NbmkW&hor>~wDCfH zRkGgN)BL8IstJZYuFoGv>D-NZI35gtj{N>MnhG@{7%_t9=m|W*6EMG$spo2Rd#-ml z0F7w~zu38?n%Vf}Jf2!Ga2VhzBO}!EcTCF6z38~Z@HZzw?PzVznLsmWoQQ%klx&tz zp7Hi<+fr*8o;aC4t7AN^Lc@P67onQ#6$w&8+jWFSMr1*pdm=Z{v7Z8Yc7#^Zx;m zK=OqR(Q*@x0~tZRrhcNOAQ6pcvMoJH$)%2-JaIqDi2ppWIqS)g z77|1sa$W%LZR~8TBCTq$d-$(IZn)jc*6Xz}C|?6MrZ~0GgIV~^0Kv%|m4JYj zuv~8bKHPA#+ZBV{?<*|dv)U88l(HJL{Llt~3xu{Me9UVZn&4CJXXqHTEs^+|DX2Dv|i0M{+PExU!4& z4eXn45$iwK-DW}HiTR?42X86W^p%D~FMMmNdKwX+YipQ0WY+!o@dhmp&z4QBCt$6n zXe@<#H+A>k5%*TXj-V}&46hMJMTIc&pl^(pNUn(X+J83Nx>SPq1F9y4Rnw`sA0*}p zm&bOgnkcDcJGC{ONoWZ&{fs1UzyNLk#_aWuWq{Bx!|}vqtA>w@UDFXV7S3I_;Uwoo z8Y=fpOU3m-ERMzfbzx1ovvxm*{xOeAN*Wuh%2Q~%7Zih&$6Lp2@Q#{c7c8?%u~^X; zT$-ISeBA0qvHPFwOmW`7yO;n#1}x%!8`33U(gxLbL|%<7@2j_O+AhQt5vDQQX-8;M zR3AqGm%P%EZZXL{91@dGwE!?m^2h&=DS7YYbiFU@+*HN1r)~4cfkz14jZ35B(Yw(~ zU81Ir-sq2uK}{#j7C)(SXv2jNf&Wc!A)ndY`v}wRrm%rr%%(TLg*$IZu$;ue3r?^~ z%dD0PN3u$`Hb%*nP%PfAzHQvH&7ss=DsI^kh#$QzAe)j*If|vM+-*}gciUkR57$KE zhrMQ*@BcByHQbw>om0Xo+U2Oid;2*otjIH1qy{I%Io=N|#&<-65cY-TSEfOj>s`;x zooA*g4=QSFd+%?~CtD;;$gEk)jaQ4+t4@IN6%*h*hO8t+*F5JYSFRkzNwT5&I*6;Z zPeS%PSX)EMd3)TIakPu>DNbC)`XA9ig`_Ms3h0)?j8li3CJ97Cd{Ou57{b24YJ5+= z%ZkD(<|>TW_fMEkJ=^@HCgk^eQo#5nju%|2??1>D|Lp9tkX)D1hj^NO#j}|_;f|xP zU_fo=W`@pP|7~Z&BwV_|Vi(yc-afP~dRPutsU>pdJInWZ24ZAWIpsI38JOz4Wm?vN zEVI0DS(l3S$CeL1%ih)YF5^vXS(q!-rVXwDbW8OnLSp>4@3m!yW=%dcXZG~aeh8xN zSQ;d~t0n7d8R9x|3g_AGh!x%*n!>xQunt3Cqx>%K(NyCC=>#}|^ye7VykB2R`*DNM zO3DlfOO&DvgKvED44w@-&KYc*DLV7P6BUmPB10l4Qr}@v&T!*|qcg(b>}O$raDTkn z-s^}zO7U?9)#GdnBKv|Cn-MC;(IWo=k?Po@WM5YWF>&z%fRUWNVpaFeH=WyT@OD+|*B(@e5b0>iG+;*_%uTfluo{ZEmKZT{5T7vBhXdUhh zLhL38e|RgH<989Y_-n%(rQ%t)BqSyl?BW{YJj4B7LCig#v%6t2P~Uf~Zz;hi#LGNq zy+Rf5rFq^yeSOF2u$jmm9Ut->xBr$B@ZF1lC(#L6nMG+DNKL64ZOBS4IsQbXawMn=L_m^Eem7N?I6}K zfyqmk;=xF&Ya3yFK@(s!M3B*x@KY)#!i%p2c<>Sb{{I(ZSLY~o_4lvRV(V^(!If$5 z9S#N#qRl&+>!z(F^A`sk@LtG4*F@Hze&PA6KX2Ha*-R3S*K%VCS(Z`jn^;e_Zc)#{ zuBypu;UVR_=K(3)XTgQ0ALDbmj{WmfoP3pr>Bi%C$EbG>82TtbQ0c9=lp*+(;oB3K zofKDJHw6SBSk-UhZ?e%yuirz+)P9GQmsImSf#f^YfsikIf-nXqdm0MxUhu8?74Xy|ZFtV4S8>NW^XslI@@HM=GfOOS6rHFMx#S(=D{fQQbn==8&}uOUSy z+ys$?1gk;?{rK-W1{ac!HtAc87bu{&{P%I?#?Z;GL9`f17zLFRzi;&jA4TtN5e~$k zSfg9G^1-(oFDC|y+~!C-H$4EB6yD52jb$<{Jlq55DWQFZ)l4lB|6A_Pos|gu3is{L zl9D0H5cc_1a|a#0aN~!*${Tp)-Ii?xE2d;2Qb$_ad{X0=BJh^LfA8xbEd6e3tf>Co z*o|{PO*}b0eg4ox3`;aiVadvVNwy%NxO49>Yc3pDaDYvQV>WPIXn%X5Pc`0?NCtK2 zlLEEhBG4yAwc*)6dxXkO>b7Dr)yH_s-a)WQ4g_4Z%0L>$qWYA--S)e{3dI4ZUHU2s z**Rq*DEU6Q^~cHgbz8)E##ewSp#W#!Vr*#0;T4hb{rr8tq}p5>k1jM2&i451;Q+^) zK0#s0b?JFXsdR@7*;C^1$gfAhD|ikj6@{V=&=uIDFvM12i7-kjn2`9{o?r^JRzob`rmIx(cg|dx8U_&eU`xK z&-2Wvc_;#r15O=WD$|Kc$^0t^$Iq_xw(;B1)Jrs7TY8n0dW%IY?{D!l&8*@ihk#;K zUX3&n^GH8VJ+t2I0QP+3;=;Jfa zLbiKQ83@EhXZ9q*kpk8*PpNUa=PGnH?O?V_BIiJXWPTLg<*<61l7garg)rrgF47Gw`)_t#-kc>_WvOu)O(7PQxok&!B8H%tsNPPr}-&UAp|6nlsvP06*Bixf*T;Ws|Ubvm&$b);~ z&>G8T8P{||WtCX$ ztp_likN6>XASVHcLIeKpHhiep!@jO^?O6?p=Yp4dc2#vuS+-vI@2PSHT>+KyRWvDO z7VWgxgH07n8YxZuqyB70|70-qn#ITkW(5}<6nyX7W zl5;fOwogQrJ>_IV%JnuMsiyH<2KqpDOVIeHwdor^DlWvav9Vd~)V8?t9}%)~OG5=yw$-McPiEPR^nwJEbkkjMVbn92J5HR9guTk zi`qdU#SG@uorVML#!klj=MouiKVg$=khG#x)53Cl*te~8y2h2eh=$&pc9hom9sWBM z(YKP4JJs#Ygn+xEFcWPxfoqFu;!CO|ARp}0!mkp*{A>v;V{e~uLZ7In8%9!G-xV?} zbTnTPTe$=xW0C9zSXxOP-%N;LiJld6#}o_V@gdsxu0c|9(@c3KG6lNP!<2~Q7d~{0 z$-th&@Q_As2e8<`7bTGg1Np%Oqu`v^)AQr!7Eb?_y*anH834_1kp@kynA&5=g64YMbhGP!h_ zYBvy#0`i-Bjy#UV^B$<)bjg{pnmc=W*uWayTV8xPoD>NU)iA0VaN8@edR@UTNvp3k z=?}6s^Tm+i;FECe>RHni?nzCrh~2Z|T9?fqsti&6F59eI@1AZV3(Y^i&-DnTMalKS zwY}Uv4QlnVx@bIZ(OjG`eIRyr*Q)puP)PCnMx(*L;I<@1cPe8;fSuDT{zxNOE^3@A z#ooa|S&vsps735+YTIksNJs*@Tisipv~-ZU3R_58&&7G_k@C(nk4sk@?_KBe$7v>g zUg6itMA%E9&%}x|Lf$zr+6-AF+4R}8O2j1L`TWb7r$gW!}_ap65YCVJS zqO~d%SfJPor33J#l>A)9%59j2X0}wj#0>G`ry1g`L<+}(JOQM8NS3p>7Gw>r*igj@u+n)6s(HFgZV#sX9nC2lPYIi7qyI!!*S=MHux4A4UxdV_aLH91G1 zNcoq9yb(AUwj}8y|6{mb-<)huQ0u0x9Fuw7XQ-r_>Jy)W$oy;fnvQ<=n9s=rgt$Q4 z_3_8*Lb9q5Klw1Vdbig&3TrPG28I5X6!vPZaaW0L%y@V%HRODLI$%GT|AjKy`P>Qz z!n&C#eFH?o&yo&VG00N=6W{vNB4e>R;k3v%cl3>O9HVl&B&o$m2)zD-o}Dq5Mu z{;A>~+RY7iP<1xnC(IN3JQFH1K>@QIOQP~kLAtG9u!A%Z%0 zpQ&$z10$<5y;s>+hR^0#522s-zutO_Ymv8o!Q~x@=F#jzjNz1&793|3>~+EU4ZEGi zW+5NeXGa!P_`LNTmb>KJH>|Zo!Gp0elvFkAbAUsYHxN8(wS*}=ry%3rxda))kGXpB zk4@Omgf3?c3Rk048b|*$RrS#AD~^rwf|P3n@!2o?&3mHB@}e9WzLqAStDZ4J9n-3_Io z-l~X-B(lc~;rRb4TN!a1%N)Ot6Ey~+^LOqSdBY+E9r5X(Q#nXPu?DdWjdM5YYti{v zAKhsjY^uZL70q;BK zgxa!I{(q&Q3|G1xnq#2uPp7aMgT^AG+P~KhUwJB<#bU3AyG{qsW8!o&iT5Xz>`3e% z70T~4i>Ldx8102{AZOz6SJOV!B0{+XRb*d5A|26yqCM6bP*C!U(Hp=9AD6k`n2jV7 z`M%1SO<%*0hu6MR9R`Svezi1?AkD_3F-ceEzS&+A$Gx&5N`)>)E(zmPDLbQBiV^%Z z*#lqpee#7wo=o!8p=%yJ2lTi8xpUWQg@2r|EdPqTgzVub+NM7Jl z#!6R6%vX!AN^lgp-Ay32d#{6og{53fV?-_v+$_pE>VYS9SpL01l$4o9@DfNX&JHw0 zMN)E!YoUco1b`16pU-pPox4Z5Si|AYL?o7#nGt%%;LL|u*pPf%uxs(w(0mQ?)ildbTR0sfD_rks@2(#lRT@X_94<}^NKmAFw}XPkHjKcKB zuj$sKy9qHd*=+=6Ro7SVr5U%Xv(nSk%_P~Kj`*G8G z#9q>%&zt?z2T-A1FF!p7fixY5T?|6ZUvGs5|2|u?G&_Dj;&%$PCM8xwM@3xzhm#4i z#mm~Td38*_t;5PWR8Viif4HM$=_%vIa8;w^;Hm>|UJI_=gAWFKM_c)!YQB}!K@_0k z9@iUA;Vra@0%$jnvjXL{CK82imk<*YWs7G3%zCa-Z95d_8s6VLjr+GuHM_ntCc zo3Q>9vz|IJA(g*A{7+kh``D!NlQqzY`D?^3gTGN#8mLnWb44DM;Sw%bcI!6kT**01 za6)bsfp`F?(nOUG@{kVhrMW}DeN%GURF9qM=|O}x=A|%WFHcv}zJK=xDbZyN7uDH~ zcTH5iUw-?iC=@&I1kb}{vl9q6k20o0#(CR>VPqa)u7pS8AXR*>2t`qw8k7cx+lvZoF~2&%L28XytahYzd|6 zXaf%W`W23(m!)a}7ieA`O)slXhH7yu?00RiuWw&FwLuodKV+>I1LcE~hLtp^SipvQ zm444`L@QrHLSh{-_*K#0C43(yrWL)pCjTdM9Mvq9_Gm2WA2EKZ9pWMpN`fF-xemT} z7&JrRxybHwV~8+8Oo^|Uf*i&PX?@7XDAV3Od6E51a^mVAmxt1ua)uh2;@Rp`F_j2m z8~-KApEF(zvkOsN-SHr`CId$x`Z0hNj&GUmEa~ee*E)_1hIt^DOjJ~>HZCp>m438) zUf#725XCDTmjos*S0WnPhIG3&M~DO#%4k2Q6a%K9j)Jt(*BIto z%-#c+q>|+|!f{pXp-oM4tJ^^gUS3LjS%bYu^;TAr6;AK}v^=nAG{BD2%CrwAR;|9m z3Kr+Z4-3o>l|qY7OAX9cFJPpUAxP{Uvr%v={aXB6LPi+2@O*7JB+2(fiV)}D%gCNU z+SnIy1wF#{c5Fl6Nxf%%64ZG*_1$T~lbf7S+3(SqFYmNvcT~5w#I_J}>Zw-`agW>u z^JWm4{*jS#+UD0kX^RNb)y`*9A01Ej7Ac=&ERh%leThH{8u+Jft~o~OrUV%v%r^9yV)Ne2=9iRp?<&%b zs(HRZ&*|S5@7G^!Q7oAMbK6=hY!4|fJoZ%3@_?GA44{YDS5wG2K)|vapfpH`r@*+pi%_;P9lh;IqkOp|caTJ|RM;<#H2#_->-Wkscns{KazstY)hNVr z>A!M#3k_5#MqEf_r+%cQ&i%o$4m_p31dA33nwpxECpJ8(qJjJi#U&-8;>P%t*;&{m zB%v!&av#Kc$@%#u!Vi(oYub{%1x+=e-mQNL8XOrp?L*VLUfR5zetv$ereoBb3(}uH zDR4?VV?S)adwMxMJZu*AzIO{8ip-qI8{R>3MpMP=sAi^+@zGKF=J@d^;OuAhym!97 zzNVM_RNlXR1Ko9G(_5kSLepnleWM)&M90)LW>3exi2}7I=Zd|g-pwA$d-%5d)WEKh z@|xHDN7#tC5g01*@Lp&zI9}X}nNOFst(;}sEmOG<<`qNkRF`vyJwkW7z6A=Nt3nN% z%l=yND1CM}MBPH;@@xFryb+{Cep!TeDMGx?+BIO01ppPeSwLNUeRo&%5VdcRUs#w$ zgtw_|DXx9UOz=e%&+6=pU1y+7!r2f!eByzGgPvYq3Sf8_oIniucnvnM>{yI`+ry+} zQP6bAxTX7@M4p+ho;hJU_Be*g@Oa`|#RV}+bVidhYCSrHBv0@f=C{j`y_wZdFlYl4 zSt5V}(l(>!RpKC7HTD#yAR$RDci#Qv&cDk4Eu8~hvGoatee2=pm%Xd=(;@%}Co%_-Q&-{qU`9+w5AHqHdb~1#Dpt-E3a&PcQsbjPZ_kIlHPO>4qMb1oogJ{7*^C^4ArKRH#EOSUfhJGfO6GtPp zOC-QMozDe&%$(Iu!`4u6M3OT)0#b4Bx8bQ#aVa5+bR!3~qS#ip z&BrqpXv-cKjFf|eql83eSW1`FXN!X6Yj?8=yz6etpSu2dGUK3gfkNzcgmR7bas<>k zp4!Sw_7Y?Xq&rJMA7#HS+audHm<$MFCb zLsD5wYXR2Ec15ZV$`OAJFd2RjJCF&?xI$aYDZ(dv4DyZShGk(nI%GsdJtN9+zb7Rm z92>v4exd}Mi_uz`fB`GbPoo94vWQsv>I(_zRmKrZD- z5%!A}?xC=@OtKe9)$_WBaTt@!ms;aXU^j`FGv4Lcvav1K`ubN%ocOIx-x#q@MEiD^ z&*7!yw~wYvPC=9lf6p%4XXFjAE1#Yv-9EE0i#|SzP+zC}@^BlTIu|dN1$7g&2Cu(e ziZu>Iu@*Q7l>%=N;TXg_d2p}1yt3W@IQPrt=l$}59wZEk-4mJ8_iDo^A2y$g(H2)* zmHmx5JGLP&5Bws(6xH(^GpU7y#N>6(c9(SKM#Bk*(R~gaT58V!aSl!5y-))`RU+jH za2?TO*^CHY(ZA2iLBah5IT4W)<`ITe84bj7Y1>MR>Dv)D>_TbblP>~RvRgmwbYp}! z`3+9&wBBzrqvrlKuGQ7~i=&0UEu2Ct!r}Y2L(+<0>k5z!+dzWg$>ZJedPOgpBAgCv z=>SusG|U@5wp|OuIo9O3@|F#-IaK0nx&WWO;5j1~{AA~r?Z*ltNP8fC=UFL=x&O_#I#ta~z zpu^X1v0?1^J@fKog(!iQ?K{$|$=Y(ARu~82c2to|BWluL-SO$3D7v~{92ZsDTGV@% zC<_8qQzB2`$xpg;EiP)OHU^FJc0~p9Ees9~b*|;SVbPz{vS76fklJ7czvxT<#6vMj zW4IJ5yYk@~GTqN95e~%M*>yh+-&?=?|14bSXNS8JM~T&Ne>QMhr2Fv6!JL-fP?SK3 zmYO<0Ot0b4IdfV?kQDYE-=Bm}Zo4`0g|QQlzRCAiQvl~rC7nt9tvo2Aa!Gm|G+`=> zE&O|H0wi6B-t&lxQR3p(ZX%Dac&f3myz!zwQW!m#xBb*L$L5xWEO6 z7uBV?o%f9BjY0yG=WC9;nvQcb{&ZHUnprr$u#mIY>o#-ewyA80X23x=4%iv@s6*e!)R`sW^-Z)TF$2)zxM z97RoxU-U9sCXxmw2eK^;4i0t;#mmjoBNe*fV<}R}X9_>n& zJ6m5L^1_OC5ImMJmzs+x3gML7LP|VpF5K>%Wk>X9*?m zo_oC_T@e^Aw+H}#G+N;&?-%Wz&$#W#$bcSqA>d;oaZ=Ls6W*x1duYFve@?#zMa3!? zf-V3*O`f+kb6k$pu2E|rSwN4O6qz)`nIED#PjZly)udxBoPbGWeZBB9iP- zRrTTh7$hJ$AVBPTW1Jbg((9e9zrzAv>Iu85lYkjAMo&R7a>`V9e>sccE54t1?g{N9 zgrT;tbiY0RMRRs`MMK6$Hxr}v*H3T8#&FeWUz*S59Zb2@_v<%P!nVpq%KCHXrsV(d z)5_CS9{(?#Er~dWM%_mzT6QyJE^f=YpTA|%FYF}ConE9AH=*}{2^2LQNq0yt`*vt1 zD);V8%ISmFpiU)rMCME6caCG^#Rsw3&A6Y*9*2wwb@c=VaTT=hfV8yF0ITJ1Xf-#DJ50W!U`%wMjP=jNC>PSx6N zvD}$gX-ON1sMoxofCodKn^-N^J3Qu^f0iSd2K~BOU>Iww(u#gNU|8hv+@nB-*1V$I zp7fGr!AG!$KiQz<6!kj5CA(g;VVnoVZVN|WO>FhqkOZs|vfI~cx{jYR0OSf)eTzs)w+q_;LFzu%PQZz*bR3Hr4AX#>ecW1ggI z@H9ql@6az@6Gl>0IDz-PYUE^U{PQ~TV#U`rq*p7Q1iD=d^4W5``{pO?mfdMI11w%g z?n)}ZT@*Q-@nT+}0N4JtxGM$uTrID&fkpC7PnGFzJ3X3q=^=vL|mHgV-*ciA%+m1NiwM#NrVG#7m&BRhr#~Cc~aW?)!rgOm-0vg&6 zSm1V0wNG*&o&C85a_RWVoL68tN2CiTb9?!RRa8{WG@ff}&f9r*w)-I|3rgfpLdQ4{ zw2XL3Yu*77RlxmMaKo?0{N1WL)AP!;{Iz1Oc16`{f#)-Wq#ffUWUDimDU=7`FIU!{ z2|yz#Y*5x%XFTGD_g)~_HT>xpy}OVK3T{@ zU8)`uD?=RB;DD=8Ybm}L7tYgU#kr>PR=CU8C0M(fgx88GpPBJ+%zE=|%dy>97Jy3j zc;;e^IA4I)g?9R;7MHf&ah;Z9C*pMp6j6;w7U3lRtqpxzCoH+F5hHThjoFisAAhyM zqq-GIn6}#ahAl zAs^YnU;TO%uGcYin~BCf#8|>=HTHbwnpE8RmA4EdGLOt+*2&F|H8vI@HHH^B6o@+b zm?1Gv#=IH&l1*_zR&;Uqg)DPLJ4%fFXl??4S5?ID{3@kQcnAor zQj>>OCR_&{Y30X*S(DRQyAcW%C+wwHQ8DvZ>5qTupbO`S{BU+pYuzx7igvA($kc?} zVsNX1NOm_jH-$bpt7PNtDA)W9Fqt83(4CeJ5R+JT?p2#?3E=YzuWix z{L%i!m+4}WiAXevVW-9hz{`Zhz%d9c$ZhV2uRpf%kY%8Gy;5>@%NU$|8Q{nzxm=HK zGZ0Ttn51v+N83^3IK>*-2((ydmD*S3`Xq~Wwkdqshea4|b2Bd^@O5|$R~IQAOIamE z{zg+EaVH*K4L818$H@*gJQ^A5mC?zb59s2IDL=o8A zCuoED`rh%BUNjvUtXOXQ7+yb9u|J;$yl5X_NX2t>0xTFy%s3^rA-ZsGeR&l-1?BkX zT^7oXjIaX`-L0sd?Gltg@B^OKKdv~_6xkW;87s{tbk%0DQhg)kPoN*W&J5Ve46<>B z&p)Ucn)5G4>AP8XLuTA9TtB8Z{Nj2C#mPT{<>wzUETIEnA3a@Z$nIKmMBUE#ns{LC)(aI0GvGpzCRP?eg z1$WjXejeV&U#7;quWmgzBuQ3gn!~@FK+WxYO#n|tt!7AI;fu^ikHYW^7dlgHaY+F0~vbh+ifA^MrJpFSX1zx zmihNfY4wNBSC8Ic!KXH{v~AZ$X9sJXs2rlh8~OBqT>Vv8RPXzT4NG@-ml8@!42?7j z10vGhJ=9PF(j_H=2n-F4^K7;hV==u993Mc%${l+5 zv$gf;^*A@cbf$!xMGXb7_ZX)gIrRW`#Q11dsw4aiz>oF1N_zfEFb+$7VA7Sto}dI)@jd@`hFuykiK=$*GWJgudc&}uw(v9KR3hN)~|z^6=2s* ztfH&|n7P7FeHd66icbLRbQOR<+M&{VKV$Yv$6K}sld@AXu{+ei;$@m?t}riV0FrVP zNXzXO>4fP3d!GDC$L?t*b=Rr;TUWqN>dtJ;uAfl?;NULN?of1oLwqc9f3x+0sMoEW zD;n@yL(P#noA94TWqCIqwqOSDHMiZ`?IX*64Ebr@`Mqnt41NKSe3sX0`t76d@&1Yj z1qWwVMeq|Z5Y@iVZK*6LwY73y5=Q*dL?EfjkG2KRTfh_`nz#zAktxf?18KX&;o3;) zHRNbx1`KN)YK>EU!vuuKtrE&rPI^0$cZ3j)Z8Ii76sDllOl@AOoa$`j#$1T>7;9QV zW?Y$Dq?3|INkuZaQ6BzVVkczggH4!@jo>sKUhV>#9x}{WfFQbxL>b@A3eSGr`8#9K zDX_@H_{kmqZ7S8b^~*L*)8ino;MPV?Ljl#Dp6+V%0(uN$}J(mhf5DCct&Jtq+W+e1@S}8oK`xw z<)SEm8=>u^5(2nt3n2Jol8v8xcn0BKG*^O|&4czaZQ90XW;B8OpF*Ms{Gh$i7U`;L z55kv5djEdrROQ)w%gQx@bD8}#)GHCEMwBB~_XNVLj>?cw0 z_~naYJuoZ~k^&T<(>Crbshm{BQOGWT+;K zlO4HN35+jl`kGWFHNa9X^5d%)b*UHAXEfg^qa9t&6G~>A90_b1UVtdxH<)V1Hi{_A z%CII{1DGR2ugj^~A`y#8G&HIQC0&9#cRFToF@q?6nXGnJH^th^ssNQbw1=!FSRkl( zmZSLtAhN1Jz6`C+pM-MQtW7hF)SoYotEAphgjNn1?KvS+*{>OGq#!D`+SSPrw}UdN z5#E!ai|Q57`mFt3p*BE&v$)@w8e73e{&iS|Rb@(EwAF6cJ@ap;=cJf$p- zlS6>ZSRa&Ivhx=oo?{xdTcX5aNh63zj;Qh&PnH{4S;O2KSkQFW7O=ZPX~zFj8ko0? zl0|I2YEZL-l!ADm31gB2I;l#*ERn13_~R#_(Z?RB=`;q@hVf$8GigBofY2t23RHS~ zeO^1hIz6Qm?vt6-Br1b~!XQEeJlnq(G8`XZCYQ8a2h%GjnPKFaC0j>{pe7_ zl3-1LlY3t4D$U?VzS+`E?Di-u^#IBB8#B-ISAeKNl^vHyUqmn`sAlj^;9IQ<{QdkE zlCE#f+VvQ)SmlKz_{`d#bUDI<201UUI;=b?`C9fu0~P@8R{o}I%QXRbtN}|Smr9N~ zWS6l?NXj%2h^wL!(~g|TniujGj>A8?Ui`{ea*_f=*uRP|05iSrBT5Ver_>XWuC}due_kfPfL0>oRV&6c#mA zBuLPRx;a3lW)t+U%2M0zihUs3b8Q)6%0*_T-PLj+L2LAb#?v`ioqH>O?{`A!MSwyo zs-jb*hx?#;Vt`y4EBi8md&z2%QGjC!OQrY4^i+(boj3#$e9*2sM zlZlSU+Zj629LZwv1#P7HpEw|IS6l}hCw&ZbL^^ih(l{#X3g~|Gv2cO(Ke%_*BmnnD zqD4jHEB1X~SFl*Uc}~4?;vn3(Q&Q%Ps+MgI$Pr5a>Wzsf`It zH2eph)39Zq^Wl7^6%vx{(i0tA3!$NzuwP~wf&6NI1?hz{yQEv;NY#}%!ZK}>76@t; zk64tIm510C=G}(u`2+>EnpGCK_z1deU$j)$fzQ{1?wK42p&Adk0N2rMVrXy|D*r&O zKNytw&}?TT6AFqUdcd77-E=VTof?9HimoX>tR5GRL(fy@JC#>(qAv4o^Lfdmz)_<# z41nkW7w8ef?%5?mp62kCFO30pw4_IW+LpEo`^JsV?DfV*I*_2uQM}_9hooBdWYC^M zClGMwu54SQsC3kG7w1K$#pyO!6goT7b?Dyfk5ly`{U*FKydDHw_PH_swVKixZCZxVzAuN0&5zXYWMUma_)tO)EB$W{mhHd0^|6iGKX zm8y=>eX9Hb2~l2}z)Aq=T2v5oBC@Hh_Z8;7zU%5Qw5MVDeYu{zCHZIoiqoZ5t z@Sigm=Z~s7-zFBR=>RBL><$@#kjhbuv@Upd!dTA+!I0gMp-LQ^OpD>rHx)$Zz%_9e z=Y=4;DVSDDQc7{@^cCE+8Pvc6T=<)8?g1*06?6;?)oQL|K!maR!;Dfeuba{dDrXb; zx?vbyXI4LnCigiy0e&Z7V*U3}){@#VPb9j1ixnDr=Cvzj{+fLs-&wf$ws-few!EHz zt7_IeaJT7zatHylokc&_j`gCO%rJlE7KZV=P87X75mCu~8)%10W?=V4qaxv{mgig< z{xd7O1MnuTLz2anmHQ5n@(aQjC~L8fuu3G+Yqb+ZR@}Or(b#?4?Bf?05JXl5m!(6Q z5WemUryz&IXS6um@n>^xXa!K$hR~!sOT!lSfFS(T*WN0NrFbJG&Hx4BDvZ-xTIqY* z?qdsKabLPxA7NfYK$}y|UdQF+iBCFKGPDeKe`9;-Y`Dk9_g1HoR7A5>X7NQs(x{fI z|LAcZlkG9>)Qdj+?+1tZ7gz84CXc@#ABRr(R7K&D*lV}oc?oE>LmS1E+^ZrlBO*i0 zYN3V_zL9kI`~yBdu9`6+tET&9S`EZ_Py2=^qp6)RDQR_`yVr~5`yUpCh9%ug?Iy)6 zcU7*#c=egVhwnX*ol*S}MCH=#yuBsMj;U&Yz0DRT{ywEn;# zCUR^5hqR=pn40O_yV+`t(Y5rrAEzU;ke46SjDJAlGaSVYG}Z^*$lwaqQBaabJFVw+ zFqMwdd+<@S7(Y(v);*S7&hFCU**;;{<)yvzNXxu-qgGy>^Vsg>`tHJheV~_WSOSxv zQ%*=gvPpSvX9!dWZht(-EG#T?$Gb6D$*C{Msi@wNx6A))1<(VRloKEL6MNh78PNA} zAn-Z@K#eg_(tS-NK)5ve`@NO)%IPzgp-mx7AxsLyOm$zDEkt$a_=S39EK@GOA2L$> z)Y*!mHShd$%&F(PY;Np1r@ifhNIA@s_bWCwb7db8oJ}6nue_{Uy|`)EV%7zE@{`>I zSijQQI=w|tsPd;i-ukv6#xX`ygr?T7!})?uOGnZMI z69csTWuGIEKIkOv9`X^X}g}KoyVe%|&RQnUsP@+krjGx!Jt-&7(7D{kU$dCzo>eWK?*k=km1RyUy3!w0`^cJZ5NCS1BEg!r5wjp?vXw&)8z2Mf`{I1+Pl!o(_ zUasb#^p<~={~BEO5f&@x4^poBSqgD2HK{OtKwEES7liVuB!Q`ocs2KT5gR-pWrbpI zp89G@VAiCerGCre{fd;jY1})th;`LB-}m`?=;0eed}j&-)7m|4oPuOdbC(JneAw)b zL{8e5WoLieg(I|(Wq+I8MJ8c(IUNiWf_1ij=Kl^cm1dwbl!#0hMoYu!Ms3e-?<;EC z<2vqLKJvhhX?Q+M_fb8Wd(E=hG$H?Y)tF#1&_gn+aWkHHB&_xHJ= zpv{G##{oPzca(dPB**wg0o_3YZJr2ox#wu%`?xYGvV^}04ELi9DVTVmo1~g7EqBp> z%eu#mY;5)2x1V29Dt%B0@8m49pEYkLX|%85?-e))19byQArRo*pkMZRjVZ>GIcn?d z@=Vt$Jr_(Vej_GQZ5W@X8GM;gUWPLuE2WI|3FpN?8fu>HQCt)kGiTR5olmY2b?l&CQFT9d4Tv=Vw9x#r#1cONrQD9L}5mpb_DU>ktHCa4cIzMreOFLf8t?3 zf7|5a#_K3-zJHIN_Le|h+#uD>KKYNHKg-cQ=!!!>K#R&2McAR?-5gUM}X@7#9 zO}@kp6ab1%z#>En2nN(%YAx`&OB}~op<{G|3?qY5b8;eNw)>bE(Wva!)a?f+JJP0R z50rwag`zhBKzEv6ST5087DOdMF4U-|lU7+n+_C)BmSC%qdi9w0TgYAJ6o=U1j_TCM zzE`hjfVGl*mu_R{hEMLyv#cET{cxP!DrA8~~vd)sg>)g+apCh>uwK zHx-@&FTl`yxkrP8QTLwbC+zY{>-fiFlJ?&?wwj=~O7%ZK^N$&kt-iGVTXD4Y4B8st zw&Ld(A@qMGi3$8wvqGb!B`)dG`ZihIY}HcBFaf6x=y4YF24H)I$U&$UGLL@}NfcvS zY(@5%gqIaq;c()=%)X&kZ?Gn1Lpq%%FnYY^HO4SoZo5y)~=`a{3)2YANOI0?`hgljcEBM2= zgFf=$kp5lu)ku48;Awt-0u1M{-!2cytyDm5XbYY9GH}F81SP8lV$}{#puJyNA2B{6 zh&VA#E&i265xDCR-Np-OL4VHrg&hzc=wC4W^E@VJzj!TURO&xcMO5TY*VRP#D=X&$ zPKf(lM5csOzIBXS3&ycEPKb}spVSs0C%|W-e9f3)>Hal0y3!h-O52rm-euW{%(Y_- z0RJ-b@RZ+HLM`7erh@CLpswKJnEqo-92|8JM##dy4VN8!(Zj~@uqVizWef~?RFlx&CP5)4rwiSnxVqlZ2_Gu z0Bz2dG&lf#`*)tD(Z;Xr)$;7~YLFM9C*+9yyx-EX%w0x0Fg}$a{9=>H2OEaV%*b3h zOdkfwR_^Y{AIHMLZ&Vj2WJAoWdocvO^j9{?vFY+gNsVpSLf(y>F#Jjwb{edrs4SoE zBbD3*OttGmv6?vhhb|`_>r5v*Z_-ybXAIhXX+4qX*zx>KKM*lu`{btHK!ynq=G@<1 za+9NDI=ag{>rpF1$BUnP8?$Nv6AO^4qV8(XOi*4TDG!eU+9URum1lAVqQ=KRssBpA zujFq!IC94!u?P5L`&%P)_&CAJ>4FH$?H+8^h4z10PD_wWg)HYODQOXoBz z#m@3AW^;&j)O%jC4w3RndT&1B)dJ5l@r@7?*p-R@5q0A*GbQf(OZ4L`+ql&6r|W(z z85ud8MA)UoZqEyWra94fMn%unLyPBlRY*8Gn|6zTY7_uk?)vdQ?&DH$h?I`Z zU~;%99aoAt11FL+BOKm5jMzg}V%$_~sc7iWcy^SsetYs6SC2nk4aEsK!plQD))@4I&4GR^Ur-K_4T(m7ZE(%t`Xz*hAcQz3W2figbeaUtLf8z zW$@CeIUrnIF2+2P7#I-Hfxx@;6SDi<`whg<*$8o@2^_@%R1P`64e_(1MZHz66*X@s zl3EFDTL-mA{F-^o*y9r-7N3839^Xu$BT-=Yjkq0>&3R(-dk6q`8pf%2 z*_%AtUQ1J!_Tg3hJ=C^D<%xOEbLfJ9w_o3L<5O1qGnZ+Mn^-?ZTwMI*@>1P;uUP#} z>5z*$aK=9SL0H{1T>Eu;C~$ekRQ14UW;YqO+3rt%dj<2@pR@eF<&Azh7F&qA?c)xQ zHq?g_*iDznx_WHGhAqdHLBr*qU6+O(0rWc&)d8g4<^RiXyNX5~*OOCD0;YzRZh+(MyTb(d&{; zaAYgd-K^pr+)bM4h@tG)twOg}8p>j7DuY4TxbhG9)zuLZ*U!gT^(Sy_>N>!$U-9D@ zM&6>z)gYh`*}Mg`YrKr-Uwh8%w{9SJc4n5_*kL@Wx$r@ycoH&2)PU!9O@IRhBwjyL zE0izXy`$U+pVz!;n(G6cM|WY|fh*;%W#OYyGy0)LT&>jQ0vh=--`;6|d;z)LH+0%G z_k9->OtIH6p2EsU-jXfedpd}XqAM#l@zEbv(w>`?lw>pAHYxFDKQQrTs#c5NixYQY ze$~vQ?#O0>8-Q*PczYX_ThK5^QU|))PBSqus8YiiRYm4^6VVqk^foj2kZDJv;=(Yn z{9>gE87nBi>r4h^*c+~;yT^1Vk21c5B0-Mal!MhAnfE9+`SKsKuh(GuDUN~n4aUQ@ z7h2u#NyhHcdl7V^7%26HqP%?|JdFFLt*Pa~qFIS>ZlwqS*1Pp%nB~wn{B3Sd{&<>q zbJaL^62|fQ=!3LTMcyiPQlRR(rOCZAQ@rM9g@hl^v!?O?g;LF0Own-d7QclwG*phH z7;d7W(jVMg=kH1}Wqf9lr`Br0R&0d@-5UA^CuPf-T*qL=uI^=}q=bl1Xyn*02Gvcy zamv!Lk-iK;6@ee1+u3A^h;m8LF)(D#Hrs->lwjcwE#$%VL@jDj{oOwRd^4U_3>%eOZ+C2Ix4p>NWJ$O{@ICejCLzg%oNd} z?pOf+?<{7PMk6E#QTo_kLP_*35Q@!?4}!{|u+TH}VelApFui4^-DR5-bXtqtcYTf% z^x_>YMiA64eM57o!yw8X43yVX{ez^u9Y*af<|?jHY!E}eSV0w*^uY^c=l7?QcMq*k z5YWnpqgMXk+d`?ZI9|~64HH?JGKR4+FtxOC4xcFe$2;GbBC|!4P&cq8$8ag%E0an2 z5GLrr%MDMjRcIXa!!ZsHa-FuSMnP5)nNKPT>@I7)BBzSDzh54ooE)2qO7~!?o(eYO z&=tbJU*@!kYPV2bgM-K~o5g7b+`Jt~pEZ0>_^0?XkBpI9 z7>Cns|D?y~slCzGgdkx~UEvF6PpSMWexiQ^>U%{{PHqqLiE<3!io1@w_sbMWd6wDOW?4G}jFHJ_+X~HDw;^8JOCSO^X#WqIu#4 zb_Msfc9K#5JFU|`hq!@TlT3e9kgFz&V3Ugop(dx=B^QolU&}W{TSa7*)S&qQLzw^n(giBY+hHPW%nJ6E); z00*c*IVqEKB4t$gKd)HWmI-~hhGiil#l}O)ZKv%!b{z7IJH_P}uQW{Yb=;UAJ{6Xk zG@{6TS4@f?yh>H6dtbJ@NgO6kYT(Fs9Z*!_koUq;Pfri4 z%o+k&GA{#4Yj+w;W4&daJQSyMIH>LG^{RNG-N&o{$Sh1;-tiD(Q4$>!`kSY?s%@}cJ#Sf`+wUfiDUi!fkd?6eI)QphBv(V z4_P=Fl1xc8<>vw z1~9e$0b(^TnQ3|uc(IgV&G<~MsUdTUAmfRzEQwxS9k0&FMb{%p`pm?@YS&i9%lm#w z5z}iC4!I`NcoD|dKKT9epHRV3TZP{+C#QC<;((UaXIeJ*|4@K9Z>NCtjCZ?U4rDQ3 z?`MdgnS*RWB~w5vRg4{-!%LP|9bH|>&+Ynvwc7O(w3?9bB~0g7$v=yI9xk2eKph}D z@1~a<-Vpr;)#5HdTPwR=Fa`~AlcRFJ#M z0pEN?u%f2%+Jz_i<|C1mpPezfb$$A;tI zl>xXdV3@!v;5cjpD4f^;?V3+OAT3o$)pBb2za>vr7jPY(1I7RU2F^Hln`fG*hPmb< z3`-ss3}+JRC4d?H&afzKzpkR|_XKMhQwC<-c=M%wCuDBU1ZD`&VilEsDL>1)BhgR6 z=_>9=vw!@DH2KPI&;EuTNfppVzxnXt0}zHTpiWgC;|U9k{M6nW_v1%wQc^H*zDW_} z5o|o8`#UIO?NlCr|E&5j`2K&Ww8oj@_Gpqf9}RWAu9Ez-)fyrKA71zd4J8IdE{c3v zY}ek}itN+o{odrovWJN^@;<3BKmQ#7EkJ}qFP<^@K=^>%gWlssk;E!av31R$JsNNR8ZQg@7Z(3z-N?*ughWBLKrmJ-|`gLl~!U^D2Sq_ezc={v6pV zaazp#2t4|{483R~U{b^Y)#LJha&hb%$I|Fg257BvuMjNJ_BD`T9LZ?a>$B9QIv`*1 z6n6H{HVx0;=l?^}M%Amm&=8&*^$Tcw5j2t<-f643TGz@&nazaLF))k@Bi>9tl)s5& z6Rem)ufDytpAG;{VaCP`O-)VQ(zPi7om*%S2Eqe++)2o!SSAetQj1;kVOeh}|CXcJ zPRz&jS(i48GEKe%+`7)iB{~3s6c!$CuPjUU{6=$@$Hx3rp{x;k$;4T-1RQt|jgYeG zYP?q4^9P02={9yU)^^xy)5S6y@Z(rpn}CKg{J@opW~;DhnR*!3 z4-HP~u9T!c;SImLwJc?ldX~PyfdPPlPt)@6&bH~K14|3I$poP1C_&ZP|NH^fdLZz0&>bjYqe?Rn}Quc zqzyt#*Q)6Kco9TE(8kfbvif|}jdL-@WZfx&x=@1^~6z<8onN`Ao>>;3@r(neV8_i^}^OlCN?(5G-xWC}?Onp5IEuHF2t+pCSQ8p<~RE zluITjYJ0Aa=f zYF@C=b(hcazuFtFrMAR;prc+B7Qr0>nq!@!|hMNR;&=D!5jPJFKPb|DZV zXwMBbeTP+6pr5#hq)hh4a5lNF8t8sJr$+z|ek+>N6Ko#xG@Ul*S{_~T7s4wqXGMuM zGLq3zScrF8j(?q_!J2Y@|8Ae0BsTOuMKSO?Pk-Qa`X3kv*o(i~MO9$|9#Cp(iE!a|UHwXa zJ;e`qJK6Cq9EJHh+R$2v)SiK<+5B7-ftvYvcn)RF)bVQoIKK6Dn;$wJ1nk%@+~a-f5YWV)c$b7$~Z$ z3_EnRx0z~LlLq?LiC&6@;F7tzx+*9seI`2u_!qe14=2*I3kyZtvNT1lE6g#MP&=Ii zp&$T;2V6^xuG|K$?h~R`>f?rCD9T9tAQfLfjFafM8j_mchtlO6&20&&7n*HuBhb0k`CMs{okXhW>8t| z{TLzoy9xky=+01cnI~ANQISfD(&`^3%K<#yx}6={DnRX6p-p{ z_F(wL=`pSvPP6ODAz@XNn+<%(93n)SY`x_ZY$!l86G6h~S?1YEO>^^CjEszBc5`o^ zr7QzcFTtV08oq5{9J{w9i*o~Hq;kr$ty82+p~KCRa%I-0Yv1}-JU**ybPia>_zgh@ zM&>Bq^Y8?^r2sek?O@^8BI`NW(}!5aBF_NXLQ?}bx2ri#VU{5_VT(a>U=mSfc@~fm zhR=`o{2vJdcO^tBP*!Jx;W@Dm4Ws{Woc48d4L!LrYOG4`v*}2J)s!#;Wyx(p0SAED z{QNQCtNX0Krv}O?2r*fXA%Ul}U#&&}Wr+3lYvKD3+rSVe>DGk&W~@_buFrko0%&Wl0sn8Ttl8*X z#E)F*)_l-vZswL`6aJ+W1Vb=T?6S>bwcco=qeJSym)LdhVuZCcUwU>`>cCw^wO~(# zIwn7QPyEyz*lWtJmCI2N+kc9QZFssBHC(nyFSc<{{*pt_Sg!yMo!F1|FqYBU-q~RV zezT8;0M>I~HxPU%M?UiE&M$782m#jo>^N}j^2y9DSJ7N$?zVUbgubI}h(AxrR?29u zSXgcuzO><3`n)l_+}SC8sk9S`E~PE8u)Iund3kA>PDB2`ODy}o{&7NF3yc5lxASXr zmZ3W(wM!iU#tah#sx_X1=GU%R$ZC@`r3|CMbgca10Fl6j;(12CVMA7gDKL2(*oZn| z2AfwaC~yVqY6;-h zRo@>bBqW5HLrP5CSFSe6=5(Lc9%SC>0k+786f-#8o2Z}&OfeGMlfRq!pQR=rM?L;$ zCSW3^d64zVK09*`3JNkB%aYw&i^5G?9f#(FRxH4-G)<|2D@I$!_;xm{oA^N`f59tu z>rsrrD;&F1tepOj0;oKG{HPHz_^_@p%qcG=$7V9|VUlTLmiN4f7+uU@CTD4x%)})# zC$Yh1+_JaMQW^5`?dPG5`Y+r{_@xtl zu?t^EW6d7OZd#aU7T1yIg?i$gowL!Jl5!aor>rA~9YzH<0C_wcuV?WxkSZ^jKfk{^ zRC|>KII^b_i%pZ2ZVGZ}8}x zHuwC@|6Z1?^Vexco_TW<6KacM&){hu-+vDb%g@haD?FD7{WL1zXL)6Nk0iFbsY|Pf zo}G_sqGlxP^ApLZNu8#_LgyLjeJXF z?p+c0oCy}JxfC6s*Jzub126zaMAH#JFcShSE0ZajCrMHUjq>jW6xj+Gp=alRu`fvL zqZC|E|rKmUnwf>GN zE=2o}L9<>cDMBe^vBA1==VL|g@8iYFW#;G$9KaRo(+*fLFY)qJV4|4&ia@k|1jA>6TAT>NNU+X}3&8WG`E!GU( z9fwTQqz#~?F296eGRP2D>3sQD;c^ert@ci(5%PXHiFMD}b`RvpQm?mw_Ou^v|2^WM zj5eV!qr@PRMd0aL3<}E5P8RM+g>?iNLc^?6o>|VZx=>b%HRfaM?jQUD8D2fs9Ts(M za}~;(w)(9wa5}eqhXt+LSvOgUPiIJtVbSmHH}RnWVWCkgu?s75ib<5#X`*+b>8N2p zzDu;SUFhQYAwn- zFFgWY47DJzoXsmcOa@FMkC!CfvY|g1AhZ%+JD8)BA*rzIN}j;HG-kUyKAROY+vWUU zU|L=^9)WZJAKl-y8&PrW(z}Mdg9VS#V*miSh0?@dvC}Y^_!9G}XmcoYFH!mlp6^Xy z#4Dx9k%M`Y@NclHm}TE5pGJUaxA=OtAHPiI%e${Px_!CN0>5EXA&QkqsZ=HQx!;Pv zNUffv)^hQsWQ>O=3k19d;^VauI8(J-zr34qeB0dv6))VUzLbRM^zk? z)}RAF2S5}#&yx0JfFL4nr|e?cFC?TA3IyX@hZ;ij0fixFf_?*+a6J16crPdtVI3%T ztrBR;^{y|2Wzz*`uKG-Q4avz1<1B-g_G3Fkd27R(LMr^Wuj{wdY<=!7$~&E!?t!2I0#7Rm4o9%?Jx0=sed!`@75MZ0zb`2x8_H*P^9p&BLmro|diAlDh@T}Y#hgfP zuqqabd9Dq@WdHGxs(^bB<3j7T-#sqh3v2Y;PGF#~ARVjH^6-K5=TYu{P}PRACh>qM zxT%F4JFF2c6Uve(Oe?*LG~8N)hD1Yy-{8^Sl=?88q0L`)IrkRDCdQvO-D`D&xjryL z-kt!5Z_%OCc^Stp4X4rr(M4w_$lJ|F0LAPa?&eas&9kHm5D!yX0ie2WZH4I5nXgF# z=>>Ef`eH1$VI$eL)ajNh$rI5M)18(O0!Uf!=%s|FymjHYt|`G)frDHpE6Q_ z>>8qWB*svoG}bJ%+b5iri~wpa1)Kwt-KU^I-<@i05k2#V~Bya>NblqZPar^nIc(R(aIN%tw;PCaFiY1ZY4L7X`35a0thBj5iUEF*u<8Yx? z7dQi!0Pi*Aj&r*Yg3kkNXvuVG;qz~)KruE--4gUc0z4G=Q5S1eJ22&W3#@vU7|X6UguF8+EE&+}m4Ddnlb42;+>p|I>0ea0E!E zfeiokr?0>(gAZyh^X#FpRG1}}%Ao;@MU`XHEV@sn6r52zpy31#J@ZgJBB`xL+0d=w zR9hS>0Vo#r%G`3R?~_zBu;O-uPY^A8{G0?_&O|X_C^D&MoA&Q!O5M0Wt8Amnc*`&} zEe#9rv)=<>Fa!VvLIWfin13Q&*zuU5HyZxw4lnm1*=?d@fRe96Q$=6zV4Fz8-L zNs<%tA^SQKRTs|j@`xGf2|Xwnp#{U~5{MffHzwwsVvaJt-FWXnhrf8v$jR(eRyiWR z5T1Y9RRL!H-%de?L(D;la4!;fmv_FQ=HS@GPVNj&J#U**T{R? z4i_qW$|$OO&$n~}e*g~gdMTM)tq>lUAJ%uZN8NB8L#M_x2Ycvkcml~P+P*$!ka0XjWTALu$Ii#)?!Qg@4ENLXQTCtJ+%iPk9oL1yK~d%(~lnX{j3+PriIQRaUE_AYcco zZI0+*F=Nw z>e%)Jy&|2{MAmt8=W=m$D-T{efx@Sx*TV|JpLGYivY}_=E|v26 zUnkcjQF^n4T=YbEwM`nyK)_2no!A9Fd$IXq3{4VEWgEx@r4aV``&cP5T+llLMVwB3 z%ZHm&h|QbU30&*R{Mh?uydDiPgsS2JQ2E@!xaqCGxz5+DGhGTW| zJ6?D;8L0o4P&h0sF3dXp>s^7aj4ea{9Cp8==V>=tC1G%+U-rwlpX!}A;qZs1 zJ;yBWb>_c&<>jO{SEj*2@p})^6n>}e-$}R(;3XS9_v;QzB-v=OZ%z4T-Qjgk!-5|j z*M&6D8P0%P6y^D)#e4hPHFW`#Hjn)!iwyqa@nKX~y3L@$ljwt$G}B`6Bdq|i*<%Tc zrsSJLy(BkB@`{9>cDfzIVdlsAx@Av%ZdzsmUsy>~)LKM!itTPfH6f#0?VJF&-I4IK?T9}T+U@nuP0J)F*K79cK8G?js( zZ5NZk_c2!;1GvPD0S>0C{Y%n}%w_bj_zTJK33)3tR17P3N9;qg;dbcE(PCrc@-3Ou z&x)U~1N+k$q(kaZ)IEA+y81hqA!kw6mcTX!R**b7H`7{^{;iTS=g1ow(*%wzH@ICg~u-FI?(e=0Rp2WBqct2)1 zM&t&V{N%+XppZzbBWhEiciQG`I+2>$#p$731IY=VF=iCwOTFwkHSN`BNzh8BfnCCE?Z`%{pxC^~_~Lr~TA z?9-RaKg-Ee6mRT+jfyTjC;EF?QW||w*J}vl-6&~CENXe6xX7AZZ#2!~w|&8$QHNZK z0ja11X|Hhm;;!LI!+CX&Ix(Od3?j)iKpuz^mP#Fy5l|K+7#|x;z;mk0$jBDvw8L}?~I8+W7=O&BMdiKiE}YGGE;zgf^r!TClnva9q!c2W{m0q znV(haWs%6@(8s-R8$w zb!y__G3gq~Q+m{YmD_$#DBB90Ui0|$y!}}ML);;3+vG+~Xx??OqQ`n7>U-ldo@tTHOcIB`1`WB|GY^zTeNbQ^-&F!>-MXzea6tGk zTT(-`TRESidgp2<9Br5Denru|J*>jzWZap}oX}(On6I159G>@GKYSq&kGgiU+Nr-T z+Db;Vb|){5ll8bity3Q(UPZ{&Y_-2VRLSIl+pRi+91KQWSA5=%y-BRDE)tk6YcVa> zyk{7e<`pcHrJS{oueP6^@I1e7=QiQz6_m@58|Jv1-zeu@TuE}6kk^5RA(VXvj?_tO z%&syYXT8OM7F+?W=*X{+c1st#ZhY^347nhTN-~6?T~j8#6Rz?;?KyI^9HS@lOning z0n~fKJkr%|007`tNl74gGHPP;=FuZas;PQ5MiMcItcy}~unreXbbLuF$Zj;lKLYKW zQD|78QPB6`YrUpJGU}{ExyTnp;9`u>QJIUPq^!Zy(||)(yESHeJ&jPy1}8>F=dW>b zb^W{ydde_?q44k6N9%0l5V})A884})Rgj6oxPjbBmuD2hi>8P%>3pn{4_!sq&j$}i z`dd1ll>N9Eg7I6La^l06N%QzKM)>}g?QecaA{>R!^;((g#of#NHU7v+V4SvlFFNy6 z(}1G7$p}plWWpWo@8NLg-Ro{le*z#r%e}~Ao8&3@WZ)Z%`G)N_96}NcHfRxpA{v(hg)~lncbKUR;yn zuyHSE5bKpmP3~JGRI**Jf+$Y*pb3b3k1;eooSR&=g@2@}FkRBq9cwNcD$wuD^|=fl zD!%zy?Y${$djhW%sPEVo;E1ND-@u<%`Z5#9j(Z$R5eqkDd|U?BsQ@xicz80q zLub=V7N@x7Qv(_J!OWlZiT8;Hv}wx(!vvrt2H6MsGjZDQz%kUBkp^p-{tBEbJXM|9 zKN9rb%#F-E(7WUHx!tzNM`ghfpDZwze7wwdhttTu{D$J0z;W6>H@%+#$x&5ESU)`? zBEv#B*}w{-c_0G%*&Bng_@tG~dR!l}8V03LrHr2HKMdK@;}26%k?_?B{(nThgDoTfR%?uzN1B!G?NJxW%bPo*zLwCmvH6S6KL-RX4pYMCU z|G->l?sK0VYp=ET7plmml+SX6QmXYz$NVh{Vv6l9T9^eJ+s8mM6V^oTpf*K)COwKctRJGwkl=2<2gQhx}tv!G@7^WTLf|Z zn|R{pRG7&1z4~>&@)PiQhVdnMGvNDFS^rCicg@XDH-$xV>Uu@;ST!Is#Eq+XTk&G zJ2&Ra;_UfK?UkD4DC5INe%#6c;`UCdoXb10yr5u%Q_iF$kxm`O!uS1gjRUby^6neM z0LU_$ZNd2VNcMIG@pw)_46zgnD(<_3HjS|~)i3go9m5*KaD;M*A3R=&_3UHpDbF-D z3KR%VVt)XbW!Wz@)o&eL`0+d-SdW~@|0{aY#L~ygG-!HrNWl(0^pD%ydi|i{$!Iv| zA4NITs&u&%0h(<*Dt;6`=x{b(>lv5Xv_CAbUB<=yXI*fp{n2nsGz_d%{xoyL?;mPL0=yktX|-rd-kA(4FLSsLpmeGWyLtW z`{nvyk3)tBC44@ish-dr&9}R_JD*+XhV0UC_N=iNaPKc?lEua*=c@k@a>l~Gx(0gM z1`p5z9=cNdzH4YKj?rQ>w7B^W|sW4^g?NGsrL-jg1g0T zmvCgA9^oojwVn^EIK&tzN<5A=n?-agz--HfTD zK6+j((aE7u4v;UkEr>)pl}%8HahzRVQ&J;t+=UYm{a7r6tLoZT|tA3DjC5bwK4KGgN^UO*!H!7st z`;m6;mQ#`1W6LSyJ4>r!aGPUgq)2&d33PsJO{WMf$OVJYbVa)_{D&Z-C26*rE_9(z zCfDkP9_7#y?7F&sIkR}7EQjD4$BBNgV{P4+>9Bz%PLZF}lr$sK9c)oQ%nU`Kgl?}| z9oEMQ@<>%el5VhQ-$~3os^1{G4hO(BRBhnk#Lu^#;k*yW+Fssgf7?_?#kt0IEq@Uf zYJ5z#>2qh8X35cKv~Gqh_#O>vu*gqGhqT$u**%dO5zQ+`t8@uB z(Y%kOj^bz+uvrM&IMH>Wp2j_af8*TbhKJyq@4Z#X*Tw zwBC`Vn`9xd1?xPvRh)0fb@%&WKy>G;`?uY>2{s2z4AkV~3!{lWD+8sh^hat<6bo1< zB<8&9>*5ZaXvJC@V6&wuQBP$ z4?(+OYOmOixc|h4@vjxrb=Ma>7ZZI%7<5xyHS}q?GnDMn*S>qh3`Q97Az2$Crm)ns zOrH}>@XIj0a9p+fObYzeT`pQgFOQ$G1X`d*y=**F(eI*=FM~qEzXebacIt)Z^zD9o zI!nvtlfW7)@m%KESZWA>_4PBO6%^8^V@tLB3pdr2Okdihq_!vHPS6NRQ9A!JLvV1r zCDPXqgpp>yZtF9VhIj`JL+rkK!y&I^k+RNBv?=1(LUMwQu1u<4!=J!gLO;n4G_cFXMV;mKJ_H_Pw8AwPNbvDC5_RL5*H7) z!0gHhSL0M=Z+N`qDa)du?TZ3xKb09ZW0Am%HpyB@Reb%n|3SJ+s7Gk(u`02nZEM_y zEJ^0K$+&~xpyI4d*r*~!y*t5z{f*Ricf$KL&wVJOAh@M?G31C*CXPKNAR?dg76bdx z#~MH?kN==(PeFmjd3=%iTC4ir#ggaeP|DwHWR3#&pl`mrqwZbS9fXfV;=u<^ctsSV~Cw?LPhVexwG3NtU=P%p6- zGmm-=A!pJk9OsOijx4#aY4>6XK))5GIA@1ER&UN)`Pe7eJ>Pn_eYeSnX`ZiS5N(cT zjX;T@R38=g%Fm*UB~~^EtW`_O`TV%hqHD zSLnsAxa5~KEz{6^ln>KaoZc*;?06`yQ}C<&pt45{A^K|I$V9;Rr3?`jR)=!lJ!&#b zP=ZaQ$WuZcNV^a5ic;I3vqOF&0e-M0cAykUh2A4>YogQ`>31=n+3;kb!t%Wz^MUAN zv79Ul(Rz{K=YMIjRssy!Y7+|t+KE_;?nuZinKAHIfH$ja{w`<)81gAO$+nd9nu z#b2sLw!e(Z9O!Tavs*ChRKCI<#xwf5-tpVZIo?CQ>!;I8^ETg03YZU}&F>A^KwS_;BE-Py65j`y)k&r z;8|r;$=1ZeX%x+hlmK&BQbitCJljm(f;3pKH)gv}%Vy<)j^Z8)fZ>SbEdz9Kqe~jW z3z3Q<<&q}X9qAs{G4SkY>#7!wRbV#UHqm^-kSb;$Oe;a;6^#Ah37wP}0k9)TVdiAV zhhX;VI})-w;5OxQ{OK~souY9i+kC1H3EGnM`xTX0)s|R_w7MjOP1lBEx27|J?GLi5 zgQUVfD*FVra4&?degLVEzn4p9n4%LTzit;eW+(pSL~F|#c zp+QavSN&ajMr>m2QCzu*4`nca8Ir4^2?J|gHnhIZ3_?9x$wN-?RZOT`Rtv)`2S$HJ zf;bOc-OnGR{tSwwRb0709~+VW>=-3Tlf$r3aJ5k5pu0{Pve33yy-560Jo<)YliM-K2u=oQlY( zw-H5(`CPXd8qqHAd)t4vz$)`k)gx)ilAye6_70UXzrIR9($tQNZ?d4d=~|Nv}0dB;4A~h`R_M zXz2cU0eFzPcK2XK#tydOwW9&@&u*^f(cCd-?(M@Zq*pRzHUTCb)2!5!r4YJ;Rh91~ z@Jdf5QP1gY!#TClH`R@*VuJeQhXc{KvMh({E(Q9kL zXH{10P!encxy0l^&kp|N&mn9O-lkDc>_A0?-t(U$1Vot|4$Nq#;7{}0O-16t`rnVg z!%*#>OJ2YfKt(d~rTdFhgvYQ(GM6n7a2$jZ(?~9Z)_D$@yn^=HW!e@;fl*=W2HopBq4Z8TC%OvQ*wRzKda~M%W1&Wp6=I% zN*XB-?-eYhLt2|4%%$(1qRG>HYoIUcs_UIcB+9XW#2Crr%zAv~uKND@YWa0!y>oIF zT>r{ALu@)8kz?upaKskPHf>J7%Xzlk^I6V0Uadzu+dG6t!w5sgxk9=3s8_hR+9Vq% zP#1tF{EN}nPI8Qd&}o$9?vxS!S<>SbV4NzZh+_GwNGeJk#OdUVbE_Umv^Xk0!LUul zB^pv`#}PL1S&=ccDhx-nvJHBn7?NBmkM+oKcZ+g+tx%>JVo)ytulLZD6oS10#R`bx z5ss87V9zBST7jLn65U@En>>@pm0N#OcJmefY14oP<&Z!suKzi@oK?<@v@L$oTGP;0 z$wJSUV1t=q=phBIypx=3Pr(XB6lwNldZT$`4Rzh0AIIsAw1PTwBUFaqtM+*jw^_f` z=8JEG#y&G^K_s_B25uvHFXnA5*7quJ;zjk5+5N?G!}U{;2?1m|q*apM4ZtDV zIUYtV;QT&kQT;ZfmDp`RM1-#rIG-MaD*^66VJ-C6XqLzeNJY+@uvVB14{qW#n z%4iq^*(P1pSC0?Cv_#5glxYMzxrGNEA{8(ak`?U~LIA2G0n0{Q?n8T3S7oQ1<#gL; zpE}c(SvTdJyQ`^oTO=>yrhmqw|4K9%CA}(yBCfkWa61Unj0sDlLp)A(4j?N4FzOaFK_`^$qE$T0cl|O9LlMNrM zp$0a;I6EjkTxAW4NTmdUlw@9ViBLr+g>nm~eCfiLXXg^(o5PP9MW{b>JZ3hxQ}G?l8rjGX1%|LB_^be|(!RWx zwD12|&*|b-6Sirnt3uJQ2!_y<3E@DYY{q2@+Bq?o2K9?$Qp3fXc-kL{zGma>>`nw5 zHu0){mNeZ_74iRm17smklkP3rtG_j(d)E24)p4e6yx@v6!7hJS@G%{v(cO=tl#6a- zoS~SZL5mRR%!nM&;?uj=O2vD1WK-qSMq2{*HBl+G<6%jvBDA$uh8nBM#S3NU zgm6p!bpF(ChL;_0SBWc%$YP~q;+G))oGc$P&FOs)<_Mw#rR}#!WXTc{#H1vP5qB~# z-0P-bb0*DBDj1q6u;n(2*k*K$(;PK&a#i4C@{c1tu=(Rf0?Xe zlmo)6b!1773bvrhaX6ECLiDneR!NQ|`s?h|XMtZ&12IAm9GOf;dvk!&PDV=u&6X>x zwzUSVada1peh`hrniA~!*mLZo(0SAl`?}Jz)s=WN<=DJm79WXmul+)QS!&$`@7rxg zg3K5~nMtGwrxokjTiB*g1Gw7F1Q6;+JZAj>2=%wIMgoMaCCj z>6K?*JbXsx9N!{k{#4uBnb-W~XPH_G(KfyZYPh0-iQIBNuV*w}OLXQ{pc<>YDk4(Vkx^pQ+p%B)GT=QbB<{KE9EN7<^cRPfTyixXOO0 z>e;?QmE!CW5I@W?paU&T`|XK`sh@)WHI`*^Foeu|eK?6UM>hg5?2>q9YPCt7nO z5{XB9R_(%|fE6MU{l?%!l5GX83=WS8GnPJb43&dyM>mm`h)bQeiC^40nvMQ}MNlnU zX;Ww_C69>&J@*{Gg``PxWY@=^zH6^WK;Lq+EI7PkpBcP|2s~?Nf;M_W4=adOoXyxh zUq+A~&|EtEnX@xoFxtm4Qa)lDx=)qtV{I8SIg#QQDz|*$xy>MQ_#`a^Lg_GMT4|}` zK_|mOg&m`cc>Pcho}GB9mx5qfqIL2WqogNBuqJ1l|Psv&>8*H%UHud$xqU% z^Hx<>`1hlIzSd! ztq**Q?fj)EkH4xV_uZn(wRZ?vt}#LJBLJ&Y}{IwrZLt;JppfgNoM z#ff3rP2oDsfMv^&ABb3x5rcN+9?$KQ&8 zFBlC<(&v-=aD6%06@SNF`N@piCn zt<^|Th@N|T5~F2?j-e#}g6+K=sv3kbX8@@QelKi=vLd@2>jCS!jR9^7wx-6=aQ7ko zylE2RAk^*Zbx!$;S8ph`T(meJQ(mj)EgyZKX*@AwlYBirHNR2j*7;(6p1*Zzh+eOV zfYH9pnP^<);L)S9z1zH33^Uk^?CXpQ%*7YiSHfXReXt9cZ=RYhu|WpI^`3sG-MANf zZH_2M&s;TJMuf+TF?A_-x2V+0T@T^K3p@Ndq|M={gszHivlVtnPp|Dq=RJw2Ira~WB4d0G7LsEI?I z{B?e+a^Lv)MZeMuN_EP+cL(0C#j}q&{$!Yi;O8+MTqPQZgMMDer38SG1hH_uw+YFj zu!g}Di4WsriXffiZEF{9Sh`lRdR~tI^)Ax=rha`+wzO$%s*4jEM?O_(9bq6e-nS`_ z8*9#xOyt+cCxM78;i{oF{3SvPIYy?TJ{I2yX~a!*}CsJ{DH`jxr&u z{hHIiF{`u$X!U@VtlxC4G?T~F_3n8(R=wh{Qc7Wp{*3@@=@fObEm*X#U( zw_}bqaJ7FZUh|ps@6?{#0oQ1eAZZ<^P>TwPxfl9b(vu1y(N$!CygcsP8C!s}EL`xe zli-aM330OirbBI=W|Q{V!Qu8|`Iql}Z&5dXv%lQOKj^ZuK%pCRvw6NcebKw;dpSM; zlk^=<-WESriT5DDU**m;$Ty2H0uLw$SXPooJe%C%9}ZSGb_fxYmS!z5E=_y7G1f`2 zyO>$8=y~Ds&ocgXbae*~GvAX08lsQ;0v5}r*03%KRel}k__l{rL%$7cOYv!yA{BHp z*<5J_g;G?)@@WEW+#||ZxS*A%IPGl98_As9AfiLb3Q^+ul#|p{{RW^68eru9(_NWt zV#4gKjmwymH94jz6k033J6{V{T{M9{I#NbkR9D+?eDv8$u>qThxVx`E6`dK>HBJ9k zSMT#aILd2-F>O98Bi#Q2!#kp5}tOcy3|~ z`VB%gVHSMmy{Si)K`4Z3m0F6xDX^osadP8tP0Dkf7Y3G)t;s4{XI}a5h2L&Xz5bG| zH$RVYq6>5>m%%UkbL7D_K=ePVvaN9=yarcTgP_+%JJ8v4U4zE>>H|{xQy^^$nNQOo z)_9)YxoCu8ci{){Y3!lvgX1K18|ii(b-swLiTr2nxGSm55QAD9=vyQc3r1z1lj}LeH^Z z`0+|nm9_N#sHv`c+z*BFCX8!rXiGp@z|8w8oi@;fvFQPtV|-Lw5tQ^FGkb>8-U+PN zY>=CQ79t3wPO(_Z;o#TaWcF=Ix5O`gME0Bok zM6cE@^TW=|4mRF73sq8mknPF4t&yfAx77OnHOI((AA*9d4dE&45hkP$!)cH<=%(Cq z*JyfbV6b|`m9?x>oxy|w|7NMJ_x(ZI++{_0D&3Gjuy(nD6WyQ4;}Sqp;CFp-x@CGh zSE4ugZ)!AD7a-5Bn0JQP9|_KDoQnyQ?^K|95o*L*uV$rjRqW;m8(e3#Rbs*s()7tnGB0%!=N0iMa*Nc(5sl z*YMy6g^)VMU?$omW9nOXJfw_IB0`-pQ2A}w@a)H^ix=gyMTCma=&Lj@!CKd~J8MO| zwHWVGIMo)KLaCb%1tCMKyWr#Fz18a>bSBl)`eoJ;40wTN&H(yI0E!9DTdlu|FO_PY0u{xCe>9JC9U6sM)}|UGiv@Z@4ZuV+Gi0n zmpUx9Gq&e{L?gMrUA6O9iN4}X6!qMK7&&`8sVjJc&Yd?o-|jA==D5;UOh{N2SDtT0 zgPzgn=C6o2Si1P^W;kr4GEMCG-1MO1Bt)VVP%(Ql1I?;m4j!{%@rSD0x`&k%nvI_jhmdYNP<>EL zRc0lapaxASo#2jU`({cszjAP_h^wx6-}rwT2`9`hl&qgQgwO^`jFyZJIYcCrlp#;$ zu+n-cReYals^Xm167&2(?yspU6r?fNS9VV--vR!B+Eu3>;q+PQc zx+c}v*}=tmba;#)`**F~&$nc~a!IL}!(v9q#;mk=iYVx6ZB$$BrZauwo2U5so#WqE zZ9PwI`dEdA=23QdW8mW2c{8KWj{M#{I@Cr5e#Ie_Xk%lqxC^r|SwNF3dVK{LQC9;C zH4KxqGSOk|8BWe(6ud^s7GePanz@^9qpaA*NlPi{Sx)AEwp<-npHliieapl-8o%g# z*2aLxI1M{$@vb$ip+(jQ!I;8gmhp%u^3`*d=Hl*OZ|58h*R~paW~UamxNX}?KFig5 zGdETiqkgqyELrJdvu2D3+*jd3_he{?4YaB=HCpi*9dq6wvP9{Z?oI~9mR@RG)OI$$ zV~yx1MySYc!u0R#8x6A1s#fJTf_BO8>S~~~zLX}X*2Og9Hj+-LXTf)sI4qSj`tCysl^?9MDzPtkXYU#w`oey8fPAvR z42MlKl0)XTYG|tT)M)c`Q!d4J$cy%np;)cXY?;{Fl@(o7PEE`D>e{{SF}YB%-M&rr?hJvKj>?7HA#` zYXn)9`%~%}nUX|5>LBC0XE*iUWTohj|1dgT`t*C7LWp?=gxOipjZ)YttNDB*mqb+i z`Tx`n%dm@?kql{J9%GQ@RyC){W;`KeZ)QeU4HL$>q21_O$(b(EAXKl7V<-&{r44sB zxL6+X16FXGJA^!`B>geOcK6JGPshQC;vIe;D6g@^P@mKxU*owfS;?DJxheSky8D)w zE>hvIgnr`z64DlY_GpOhnq0pt(hYcu$h;3l4e+&WEF6JaTurWsRDk}Q7y;>h@058q z*s-`XSL1RZvNc&`<8!**d&TNQB%M;YOS=BOE`>Vs`O7~G-rM|!ji0R6$YaC*cb&Fp zYx+;OQFe>i)Gy>c_mk7!|FbY${FO`l=Hss;)K3W32dDmXC$-XG582~{d5>Fl_l03U2mkACh0}M}vm%fWz7>)xZVkgV zct;{!Yu4cv&N~Lk8dzic#Dkalv3=;ZP6|^nz(x!Kgq|?o@4UuyaPTbp)a+!V?>jdV z*uUXuljaz$o$ z`lPxP^xgl%Wh7ZfIzkj%JZ=zy?1W~pZCFo^S_Qrv+6$UZJ=5t6f{ z^`Z6RZnH9%g(vTkbud|rPfS4{zq=yJ^(Dud6SaA!bQ1>+b}imM$_l&Lxwq<8c;THO{vrI%pS89cA^tHYIDfH#Uk zW-WQDtm+2FHuLS#C3raxw{`v7h1$%=sLO$>`Af1)@k!ga&45~JT61Rr_h(@pXE@tk zh{a@h_f3@;e6Lsh$rL1y`gxRW%oPdpj{Zm@|4a%9kBIgPtV@VoRhP(C5g&MXBi7w= z?KrgE5<1=nWO2WmK9NRmGRZo-_(U?axrW7L{@6}`uC2) zeIU`EnQuQ?2D&W3#dXXlL zTpLca>yDxAM=^n($)|TUXJZei83=<F&HwxDCsfG(Z|>HJ+~-!&ze5Dx;h69kmXqfzvS7`}!GA$&&9l zj3S>_l_mX*w3PxmY&8P=7*#^`{=oMPo9D=Dz|e6SZmS|>wJTjx?8AON67)vbt}*y^ zy?{t9lm{v9SPF11133bhgvG5-y^AK@qJhSEIuRP}Ztaw&tpo zonV*t-T2l%0D6VaQ)b0>+d56Jqq1L*nE0g?mU_1%gp`e#`D?E(?iI4iMw45=^5chw+*J#rO6% zzv(3EysAoQC4jV>c{G>=FLl8IxFNm++LMGDsc0QgrfEr>I=60@&hdt|k8W9|;|D$f zxXFIB*X#b3Lcd>56@2>dp{_;@zq(OVccI%4tCe+VHM5d1joLFA7HIf;^N-V|r_25~ zK)&qce2-oO6lv>$JwU4gO&?4XA2$Iq>-wYK<2^tiA~~!EU*SK$*eXM^CK;K3=hk&! zS5RIRm68OUL%^2{n3i>OuTQ9IyK(Tpi2`V$*uwWa^9D%_5Q+rKlBIx%$qhutpF~yp zY8CqNZ1rt8jOZ`l2RwKUY`MK%YxTdkR8swwsbM|Ynz+w9|8OcHy{2Er-#`C&Q{bqeAMJGnUYV=;hf)0BEpXj5rud`3@@_TvZ92Be;~0M0&#J8 zUqzMaZw(X~;PE&9=HoKgh`hv$nY^USiM96ywh@LxRl-=8$Y?K@oe?-hocFnqZ~MnB z=_Vh9kpCu1%lOlP`=Ffzk_`y*@fw;wI?=%1)mHo+CZKt9H1j@-t7$Cu8y25}&OBak zs}~A*m16s&&a-TCM|`ot`bEv4Bw^iF*u*cL*~+O_7aPfB@l>rV3Sg%2JuA)c>gotC zhqitbpNMAjbgsJN?$dU-9#;)X^oJ#TT6mWgXVs~I)mwiP7S(7bziiv7wHs$^+<_y( zfc$N@){@CtJ9I)G7*tt0U+tSo)>&2U=(h~G52RWc=4(_8lCsa_;jKXW6IAZ{P2f<% z4Bf6N%`a*z+>{MZ!1n3b)m4CrXvtoWiGG3cqK=5!1*N3-_HPKCeiuB3$HFJ?L#6b1 zktHh}l8GGGpp#7V5@8G^@)`^yWl#SzTfOJ&Fy?Ti6cI;JTAfG0F6KJ1)a^zKSD2gls8Jrt|^*ezf! zaMq%VBqNQ~CTf^O6r)jwemI!hZ(#MSS+H;}`utAwn8T2zSz$43mfGM{RYTR(WABmb zK>ND5`mKw{Op}`ucynzdU=7HTke*|nDf1lG}ufsb^rU}C$AkJ6fjBZfI4Rp&`RhP>6D~> zJ_A11j=S~b{XIQmR@vXO>dq&$HO1c^rPyRtxD1JRhm&z9?l8&A$mEB#Hm}@?iBMJ?GeH583&|1y zCYh?X({`9DPPt1m5^n!2kJbiV%&mdHcc+r15wwmZrPeo|_kljZl>1=(b<3=R-2J-8 zNr$zPB|S=!aJQ`~ZP4)ce*hR0-G4G>Z>QrkJkejivlsIsuu<~<)*uHtgK zTwT4g3U2Vy&q)#QDeu%O-_(-1x^DL!$+(i=oBGw}Urs3JCNHA4t*~BW2H!LXdsIbV zBj*;beISd$vv4c5cGTc9!)tObgh z_k7x2|9o%icV-%d|L-6B0-8Yx@UP&25C~c4oNM@e1CZ@wZb;lAJFEJ>BS5@@rFo}Gdz+t5guWkn@R32~p$Pa>(^3T+xO zyOq)od2^?Drn5KVZre*&*!p&h#nf;=EoaFVQzq?7DpYgh;18kR=)j9S^n2`}1@$l` zy0?)EZq6tr+hKLN@}4JQ^Cf55S3 z>dwwU_I5oZbbb?KgKU8TqJiM2`Wa~z40ka&$Nl_JCs&QCB$4-BVxBRzUi{EWF(fk) z8swV!ueQvJnLVbQSt(|w%ZwfwEXNAo`W**{xNa6#n|;JX3gmvh>-m*kh(s@~k1P2F zUuU5I%KDBG>Be|EyR0V^x_dg0Yd2jH^4QwaV7CIj1>wgZ4%6g3;MTd6{0jHKZ+y=7 z{5{ZR=5gTxmg5a>r)MYA$kn&G+nx-78@n`c#b3P50hM;D-<+8Ixbo?y^_bZh%{6Y) zAZIT$s;@MBODh)#QS-S43J(ad{8fKF^wEFsR~S$m7yf#qf_lcO+E1rma`{a|#PjMN zjeyxloqs}E=~1aNYm&o*kz;TRc`u}FW8zWIfnE36AsnLNao3^6t|_C`)ZdVETklW{ zmBS2pu7x(VS9qp#yxmHYc^@b2@)*vg#>m(^B=QZTnBIM_1~epT$AGY)my}o(u*CTn zs511IG=RDNucQB^VXgTE;LBGGyF5KTMLHq@r>TFtkdZ7o=r1>Nrb6xfXj<`4EJxeo zK=~g!!IbOoPeaEwthB<>Ex*bhUJrmFOzZ%7bjcY#~imiX6DP?@mIJt9i zct!@vFW zIf}R`h(i{-bpX%Y2GT+`fKn5kq(CIE%RVGLOWg&}Y} z@HoB}TkY=XNibLJdKM>7y67-h(@}IYWj}kh`GG+?g;vA+9y5qG{eq)Er}+pJN_^Rc!sh5tQ-i+UI)V?3H0r!F%W<`tYNwil zd#LJ~9@CBTa1&?NLy^=e=rv2^vqC<1^}>&h`8oq{5H=>`7)43ADcZVH+;!(QEi2~f_|T<&>P?5Ff;(nuTU6&^*h2w|oD8o596%t*p};xDnzgiRuchJJ-T ze#M^$;vs~zpR$RSuXf75T2M}}%ha*&MmhM z5I`2p(o<%#vKy;!5&rkS+W=z^g?QKYym8f0(xA?$H@HxO`z_JHtya79Jy(k9_ZuK7 z3nq6!@=tLW@?rP_a|7~sK?h`e;H|!0&)JtLvG*}&cAJuHx^M-2n%P*^!K$$@UMJw| zEVR4Y_*lHg(Z5M!BNdPaG3~_|&2(uE021i$9RA(OUC*e}-D{p+@Vj!pYy#mGosBB# zoUg=`kZ>N0-j)H`8~Wy88n~yX&6o_>$!??0m*XMH)4xc^#_#`R{g2eSH6N$v`W~Ql zPNPJNw}H;-ZVF_jT-IWr*>V_u^xVqcw}8UbmF{QeGj8k5ru_gIP;~E_DAV7p%bvDX z4bNQ;OIhs6URNKT`6L~$#zW0mc29P?>8fhw!#eHmFj?+{{SIEnpXbM2UkPtSrT0HK zXQU9ht6UA%*V%yTMzL__h5(BQ5-G2kEuGTp`{|O6n4T(Un8)tr?j`S|(iKw7@*byW z+0Bh=U`CLQcf>RA77uw$w_q!&w|hpWN1Nn1!!guTsfn9pFUq5*aW(xAs?xNlsgiwp zDRlhwj36_p1%ENmXoKcEK8zbPD8G3$;<0LYsD2J-X%Ssa2hlO`ePi#TUybL=^fmwR zCec;iYjUJ@dl=SC(x>e!c)B%Tntnw68bua_6P8+k534OeUd@nR%Cns2??*$x+@|Dh z?rV*o?+4TwAM*jQ&`${-AEAoUuaa|F71!YXx*Pohi(3&9=!GrN8uWM+{fgmpxU`-; zgLWPdR9hH{BSrF4|Chd=xd;l6+HW3wVP6gwb^uTPRDrf>l6%?d7bOap7zlFeIfXwNY!6E6t zr}AMwTjSYZPgntX|60>e~N6}8@cvi56TEyM^hR1`C4bBd;GcV zd6TjihM9`RB&DD(pET>Hy-{v*yxqOVEcqGtUFno4EXFu)yjv6f+g#0H<#+T^hQaik ztgngPyItVlm!n?ERhy!~(&)%zf z?LOXm{cK;$e6C7UP0k~1bEC~}IVJ4Yg*U@&Fq}-p1r1^+_dDCJ8d`Df%u$41cz)vt z5(MEJDY{FKP4lBXbr&Z7VNVuU;6qsHzV&&UDX_=5l_B4|nBVG=S(y>FKHzJTk&an< zx8^ge+2^aPzxVB#XKkCukf_t**S)xNn9*ZcS^Cd`y`IaSiTJ1Vrv`_voDIOybF?yp^?m?KbO2n&{jc^BdSV`C#tf2UylWak8Utyv z^FQ3|YTATYfETbvKFjFZMdI1cE)4#fn@%vdKViCcZQ6uI{@bYdpd)c9R?Wi zuDK9~a*CDPZDYFYUvm-P3hI?wI{RT2@2sHPR*nK0Gr?|}eSArb%XhKxGl;AG!g0v-y!AOzXyT%`=h}T~H zfHViYRYh`kfRB$OixCf8qa>#<_v|<0x#rp)Jw;8pW)cjP6itpDh29x!HGC*CN>gBr zyMMr!@?&ACsBHTScd1jv1LE6vp*f!c$Vnc(RI8G|7GQ9aYW9_%4>} zC~pptvI0Kb#uKp`LKF-6bx|5t+E>uZI-^cY2g;4{FK2dHuQgQ~$xq?U4*9dS>=Okf zKyC4wUsOB(M<&erTEas|Qk#WL=e_>{FvaoVYegjvWjZd}OEhpW{omanpi3Z>+W!pT z)-D=72xKMTU**2^=YG-qf;hvHjLFUEVttDcLVEY{!Shs1HuYRYmA9|%6HE* zGh~w=Y)yx8j2@U|0?}#w0KEo0^a|RFN$5^e33ptSVSmL0ngZOxcAGIK4uJ0XWNUi3 zFOkOq=$~t*N(>D%WnkOD3O+bsax7^!9+7%yq|_=5nVSgv>1oIGfWcb_KyU$PCMy7D zsZ!-@o{(_e_y$MkQNSV|NZt8D?cCuI!0f-%QdJ`O)WNW|g*Ke|>I)Gt?gDW!ItvP) zqf2Uw4I{|mre8Oh!pfB=VM(UY%;uX0$YLd{OA*m#3}VnIU_%%vpB;z`lZh7hdDY&( zjH~mK0KftOeY@*HbO6INpTFOAE1Sf;ZUMi?@;fLCXr2TOYwW+@)nGR_m{uDv8}x~O z?(3&X9!+(d7~^W&Sm^@uCW!_0&LnH zJpZHqL+G&*+M4%|lMsM>sK;Ap|IjssP0p_XAG{zYlrStc9}v(!>;ZC6{rCQZVoL)F z?gBo*(=F_M>-nG~7%)BBZC-Bn0xS_`fI%2aw0-;z%G{+y3<9|Q2;f<_cKR1nyWrh# z;<^0D19jKTv4bnq9d3^@Z`FE}Q&IGo;RN5j;cjNNPcH(j0Y3xqT(upvjb-8fI$rxF zn)cAK0XNh0G6VQ#MO@a@k|+Z2UglW(Bh__%pfNSFpRBUev7f5R7T(tD4omf!9xRXQ zkgXVGmT|%#Id-7mHHFN@j?o4k3BuP+(Qm&nQDaQ1OPAlbN=E7UE{xj zOdvL$qni%;n~zx7Pn4C@68?MV$NZw_vP|rI>%hYCYG8;dxl9ZYBuP?a;(HJZ{HDzssGwWWxTK@#dv;190;+gu(%2I=>x*mPP?{nA7d5 zVTl+$gbG4BFJ}>knct;(F6B8A78lR1>E6mXXY8|6Gqm+>a8Q%gvCT0L)6K(j1Q+BzYQi|a@b?X{%rJrS&Qv0+|YJ;N&irdvQ$oePll&y|3OrC`_EH#HCP6y4_j;$T9|KedqAA3^`(8jOdyrz_Yh@XKY_he5B zl%^MlAp+e?6|f37%uaP6U5UK}DXW0)ZU~nr>VSvdYn6CiKg@OjEr*!D0xUnaYo<+H z0(9cp)YD#_14H~e58xri??x9Wcl4V|`hcl#j0$O4y$=*tv8051DPC}Zz#1*=@+YE; zzAH_P$ZETlp-aHyy75sM;LQxy1X`5ARME0O2=zea-DOPcZ8MKvIYag(gUAaeDZ27i z!=c^)H2}Bsc;H0jXEVyy{@`lYqqV

-^F54?&ygejK%Qr7Qpl3j9!JGXLF;+)w^$ z4Y`u8I(upr;-L+Y2@S^W{gNS44 zqae^2FnI}QvSWu<))79SL<4U7`+V}sKs+w79{izkuO0wY=j%gDLd(A59A5y90C6=mv(@#W(auG{Xeqa!msHze*0D$ zsSS{j9;qTNIiyA@DF!GFf=Ks}5*V=2-65zTinIbUdZb8S0)jLG5+jt3`<&PBciqqP zy8nU2_j~5&a~$u3a1_EL_tq!$l=I}h-!ZU&tCloQv58}zIaG2w2c1%F_toQE6NQ^h z+BnI-x=vw^evbUj^PoyJl;iNi)*|jzqUWx{Sf-d&=yB0W=PCEzQgI72>bQ9!PGP+; zTEwHx1va~M@WtY6*zesAyZIEM8p``zs%jv+pmU7Shyn)!v$lbL_o-my4*u2^_AVKm zKf}NfnA12KJ~5$I8He$T0*2QO-fKULe(gHTMI2;}U-(olN~HyLP4;yEVf@zQQ|W#F z*CAJO!Ds`8Crk?UACAeJPc4tFa}4@E)|&qxE&81k6Mji>WuMN`Sc zn%tVHka`k&yBTDED1aS5SCcvK8TvYf1;wN0vp?k<{xipU(L<33Vcen05rZ9GPp}l2 zb9Bp+ickrtf-xv&?qt6e>q&Wx`wDUiFxiy!@mrluLwozpPipm5l+A6HkO-fmAA&@j zrAQYMlPcU&;INtRkELFnx@Cc_6J$e@%vHhdCb~UM_RhylR!I)?)FKek6t{~=8`v#7 z@`4LnZHRy;vlk7&Q>CDXgjv+tb&JX*WnVChEO$(%I2^-y#oaNrJ!cg8l@vLz$Z4tw zqLZgYRzTWAcp7Mp?9bk2pajP)oC4_qEkU6u#!K$WsjVgZql3knJ+3^s{BSw{Wco7=-wHv9Hx31GGZCHuJXxyWw13%+W8mC0ILF3 zVSoynkSvoc9fUCCgQZtwvewC6oP|pDH=o4`&_Py#6N41s!6+uWhPCCMEBRf997DDn zmAm&q^>;$|I9>|bBFAd1JcU?^U#^#vDYyuc%6!y6gN{QQ^Um`kp2vS*w;a46p5hT% zhp8=7icq2GA86^VWjRZ&ihXO?~8Nn~_3 zgP@)l6_tAs_8s)*Ar1c7CkDq_zuliAze)0@&~?}bSC8 zY+?80`!D6V%#)BN&lCBw>M&wqvM8abgRpdgU`d+25O6_rEUWMA*ya=8@CKYlN=&l( z6hbF;E9rAZ;T(g^lx6Mt;lSRWO?-VSEhR}F*G$YwSMSn8bH6tX{9$5ma!Qr}l-d1# z$%2&*y9t&L=#cOTL+b&U%REPnR`Os>5E|7{1PzX@RVF~4P$W`F@FAF(C{^G3fu%85dGUJ6%R0g_ zaqauZ_m9Qd5?4UVn8&h}MNyrH2)unOhE3XO{OZ*#trQ0HsXqMW67`dq^_iJ6fOgwR zIzP4WpN+b{4zy9-o{7o3^dceZ?pw;#bVDs(uGYp9G{+~q1HOd{=CO;8y$~uWPt(Fj zEjddo4x9>;uBuRw9!Ivc3}C zL)f9=O%N#))~s+%V^g7(*2|1CCd%Pgf3oj_ZGEWYm)aC{r~&;|{wUnnHsa}Qimdbf zS6mb71+Ed@e&;i1X@W4pWh-wt#39Hz7^U2xNS-t5hDu~Uid)_S(t!T2y$H}n@c7-ycdwm*ErI8Pm7U|}oGz<_d;p~AjZqxH&X&b- zD12uq@YQA4@0E{69U4G;5bxg*&e*DGtTO1WV*zAbDC%`%-65DIpX)MozC3yE1n$Ak z;4<#=?0H?}LxZVBmZB>9+JutE($dWiT^tWC4N;V5uW3au(?!c_{lq-*pId%&;jnCo zr0C(<{zf?<|J5(5_cw?(jhh2Xaki3;#_$fsyukZag84&litjvP8Cvv)S9ada4**9W z5dr}6$l#LDRqaw!PEk0y(YPoeILgq#ncS1d(}WSD;O5~7L{_6xpmN0Cq07X$GY^=8 z>|y9T!Uk9qC_M9B-iY74I-!IMMrSt_ct`*JF%vA(-ItoGm|elA4x%V5szFvBybwN~hB;zNgW!C4K6ZF66wPx=`6|ttw2 znJnc?{GN{%wbQj14CbYn|B-1F^oqJ7OUbBxy9U8c!^KV!d68((9ty*aKfRi^COHWC zJ%yM;m_DiGMm#Kf&HG%SVO^Libq>_r`OTy*|J`=gv&F;6C)-S_*Pp)4zlM~?8&DsP1qo3Iu!(1oacSf zrwJLt#^&t|W|db?{JV`SD5B4ENgsU&_Xiwb^9(rE=BAtR;XgkW`e`>;V6if3ej05d zYq&f8MWh*AlRhn~D_ADbfjjt1rHDV(Qt*?9!tC+jEPCOjlI5_^_&C@jEit z-!C)1Pny|^vW1FZ#caN2ua5y`j*K@v?{aUu3Jb`=e7BodbYk=gDtWcsG%P$CO?;c$ z%-WTHO){X8|L@NSIUgC= zAFXZ?upQ38bC|s#8?oAE+8%xDB({$Ai`OJK(6y4sRNM+W{o`{;7yR_kwR;s;%}4tp zT_fuJCY@_10ECapaH`azxhcB#a2)It$n>S&R8QaqWEuMY(XkpXbXBu7g|n}AXx10Y z3zg|v+lDU#xu=<_F4GEZPheygss-vR3lF?!Dy-B#EuoIpxu3`@P7hIJ3qQlPSps^o zd)HmE!v;cd#GELT-aD|L_* zLJ3ZcNDNoJ7w&8@L4>!Vkqwv(9Nm~n8f;d zrk_`bEvx0|Af`xqFHNU{hJk0H4bTqcYGJl+&zlFHe0Qs3f(cI-^Dz;#iCd%mMX>iy z-6`Gx=&^UR(I&+3RjwpwR>UTA`Ba!Y((>=ZN|DI$&zF?SAfO#+Jv|00g+%s$_Y6N5 ziQJsaL*P-)GoA5NK9kDJ7_ugXg;pF;Mw5JWni%$r`sg^;|AAs-p!NLJwb6gyNA>FU zid~sPA!`9^dl0;pa}b(t^5N0P!b|TG6Y5N4$Ju(r{5>*yb05yC#MEJ0I7M9MJCHs6 z>Ej^8WFDupqj2@)mjCY+E8s!!2@Nc{`)100qIDa$u5)jS1a%*wXKhbE9I~UOib7=k zZgb?T<7|#Bo()9dle-l)fb{DB-6(SFFb0^~*UUazCrS#UA}9!|$Cx3MzYBCzyC@}b zvwXmX-kKfbP|IwNEZEgrII09Pu0;}X|2za$Ry@=CHn$h7#z@cj1_xC9(X`D);kIj# z^yKhLj~zFJFz)z}z(mjfo;Qi@*=|kg^#a-?d8ucML{0zmXoVs9&p)|*Q1SZTjaWn| zg^T-=_oMJAyBOKf#g6r!-<#Z2e{(k#m;oUd4eZHC4*B@jdpTgcP+&=+# z&%gR|LR0ZETATX-lYyAz@4B!08o=?@4MAf>M7bv5S4r^{(bdb>?ztKHKrt>LEbRTGkM9Tq zbHC3W*Y4e(Vysl2w!9)ADo{X25&vSPd(Se8>@W?PO;DdQXq!~RlP>57+3|3b7+E`a zGFbDNb+4NJ&vYh~6O9-_oBk&ef(|=z^k>ap(!Sqoo^En$-uBJ2c}{v>@dsdJBe&Wf zWBtNUw{`b4Dd3zzi;k9^jjHco!5=8dF%;sX8q!(sb_{W;PuIUE??m`n&kS@QR@ z=}|nJS62=rn16uaLF>G@O|X&eQ0=5!u-rjt{xL$uEbch=GGrsU?rKsDVev_<%Txug1@@6e#tSh2rMlxd<&x^v|9yK?BTZIn zY?8KUj$24E&uB;wcsWAA8A0e*0HwnEOznW~<-2|aBLNE1V|(ZAK&}09f1O$;Wg^4> zyn;)ATNHm6KNJK5#v0)-5&8?Dll86q(GxAN8+*lM^7)_j1Zy7P%>lgmg5tJ|Db8SdZ+p16qZo`NC3G}t! zK}qxph&ik|J>IuJgCUOGPY&GI0Fm{h>&*0?fKVDF;t5!N9T?(B*~9{mya+-vunb)* zSe|FcGEBS5ivxuCzImF6S=M0nn>Bw7`OZB;!9?fnhdQACs9Q4unovT}OBC$C*Y<^3 zq~;A36XR-xKi`8=<2iW*uSQG-P<-3D+{j^<_v!{JwUlpd%md-$LuF@l{W^tl9^2(tzkm3bXB}OfNg2Qm9fPPrHz0jrL zPxu9W7H4+|CwG7nhn7o%O7hx7bumF{zc=lVuRjGHLLc5CM-LtF6LMZe*2%fgBNTvE z{%842t0f1K9p9v~KlWj{d09Upp2+#nuYWR|b8f3(8M?pv12B6T!xAV%JY!{LEml+- zxb^(^$_PSVSr!jkVS(VDw-)eG=U68&EZU zP;RccHr?2;cCz?}V_~XkLA%(y<;BHf??;~2BSpG<-MsPRAhT+toldDHSHair%tPLk zPXB%$U1E%$i@E+J0u4sgTY@y; zk|QN%qV*0zv&I>u75KE0GRtlYm=%4GItf2nG?JJL7*7I2ny?EyEp{gs`!RK1YFjeW zSA4S28SA+mC1o5&5hiHEr?0-(B|yNh?ghP7V+RltoT?B`!3%sn<&0zL!Dp(AaV5<| zBx6sxr9lAMxkuqoa3o!w_CBcA2b|;2J{!NV(Du2bQAs$+Tb_a#PQiWFGuXIa-NJdM z1>LUN)rM>agY>;`pnoc;*PkKIyUEI^!`}=VI5|k|Zdy2PyEE_D%d+TE^Ot3yIq&l0 zdl?A4M9&K>FZfR1IX^o!`;nZE=Wm`@#GEy)~+zEw>mXUv*x4P`V=jxhD&zJM|G9)f8NF zuHAoU9M7q!mzkwgjPBK^;{*!zQE|~Z*a2;`VKU;KS@4f z>643eGUv;70qDXcn(2lqx#%<*Tj(6f`@oZsP6xm4cTTAHT>jiYy;k15Io%Faox4tt zZp{=RF!Xyk2K~CLg~)HX>wH_fE5=QSKeNk5bTuUpHZav1>pbbSDL1Y6@By;_y4FN&R+FB8zQYKe;_rk8*CsWrI|7^ytqDgG zHsk_cOPX_%enWel!D;hoNgeytZhTKlNNGUPpP^>GtBK`W(xS*B-ZROMEDwT1ad%K3}Xq|bcD%?2}r zah4UH0G~C{Ne@=E4{Z0AP6zG&0v~qlVSkoOdy<_mi|g~tVoWQ%M+A<4HSU)_S{?o* zF1hDuFl8$1n0DNF1Qy;n3iDId*Hr(l?z*ZR@5n5Ioyn8=q{s>uFvVYiPHTna6QRoC zo!75`hs%6*+?s9GUR7QgIv`M7Z%1B%-$@g8=cuzRrqbsKYWVZ*@doIoWv?-6t_5(* znmu2AQ`R0z4#~Tv@>snqd$;ms2R``RpV?ALuvH9NcTQ;QDKh6uVFkDNFkLPlwD2oX z2WaYQ-{PpsV)oEn>%w*v=$FsLoC~t$5y$7uocxsOe@`zC%SuS+Sj<irZ|+hQFa zw_(Fm(6-p<=PkbirCQ`ixslu~@{>e|9uVHWJd{}UgDuV+H*FcV6~J3QzY!O^?Uxx$ znXO{p1Z0<{>|eE}Gn84CZI9&dSr}>*E_oL{UDlYw1#$ZrGP_w2ZmVA}d1vjaR@cOsEgyJvr;6X4`^sbcdw+l}ozdi$W z`;Q54glm&0c7tRyeZQ5PWBLT0k@%|#L|T|eK98;f4Vq~xKgpr+vLwn-v9Lm<{79PB zg>7I=_j11Y%l!KjK5QVX4po!QX8ytZ0LNd4GXjlJfkiFt7Q$P0DJ6n=dIGVi;zV+<;V2%-SXgxRB+1ocKxT<4|ypOc$@#{ z$Tceb_}jY(`HLWAi9b{IDgsK^Z`a>@pg;58A6!-i*At z@p`P*KmK`W;k=yZU$%>mwhM-QU%3ZuNVF;RekZ$3xOKo?=5sYzsJw7124=yhf1nG! z9YiqB+#6uZaTt~bgYMr1P!f!xteuuf@cCehrFRpNa>gxjZVR_62(El&MbWZO$2K2y zh?*`pzFgxf8}RR#WQ4NCUy+|nxh$VRu|>gz=0x)Hjw6R6UBkGSY{-%{djwIoXhsD+ zH20z+PNbKZ2x9J`^8>goOBw9?f#XRAk`Uve?}xyu$;R@fd59t(@>W<(NPQ@f70ToBMiZZOT_D;kb2numES( zv63bUdp_QbApdsINqsN{I-ze`yMO(1gZ zM*t5c%p`{PVaswY znEv0`@Nyh#Bh=#}lZB!Nl{*{A5+UWc5v=lTDSl^7AU4ueSXkcBY2XHuD;E?M%Z500 zV_q{vJft5PE%bg`uOxp@xG%I@TTme@cV3<%Qs;4BbR6|^R~Vz~h}LzYE+YJbZ}V|3 z!?T6lYh6uVb}P7_9rx^;6o`Jfs>R+dk`Z{sC8kTOZ;6$L-gvbn*dHG`q91%@v^uYVf{>)LkFd!s<6cdYP3(~f0$-3 z&z+;U{dii?zlq`z2pZf~louYVH^GBymn)#TTzZ!)mWk!gpY;AFnnypDc;~ z;5L?Gi`zmaM3TQ{CX!1Fx!6lHy~l_B198G|t=Ezk-`veXXHVP^TAWklzU8rjz;)$V z4iasr_{Z)|O{j2$BV!^tymM+DsgJmij+X6biCx`(*)-EM&<7NO{6i8|tbb-!-pKJC zlFjJng76>@xFvZeM`)L|n6&ia1y_S}T1abVV2IdyZDM}Kd(L&Inv84}$QjF1n~G~g z@(T2e4|{2i`k+FAr>cR;BbH19vE%|XF~_`{(x;-z<93&k2;E-Au$O0Mm8T#~^2@LH zjo_I10Em@P`R|7{xlBaKxRT*$ktj|iI8dy-B9yyUNcxS2&rjmLsXqQ#`gnCX7k1U< zF6&Uq&gJKP))SGpZ|2du>;1jojaONP0n^L0oCQQ0cRaCg^r*tp7187v8eE_uS_ zbr`x1(-n?yi3k%zovH{&eRJrVhU&-AUI^tpk0;|W%!5`+mE*qN5vj*vS};CBXtrJ{ zC#EcP?Ly2C%>-px+iTWU8#nQ3sSlxdSG@S22jR%Eo}s{D(R#SL$m24st=qYq5WJ1$ zTfE3&i_?Z{K_^%5@ro>_boJOzr8CQ{BK<>>0@MW=2F!3i@-MyYR!AeEvZjFmG!zehNkZC`V3$c2m6lts^jH*1dLz$%^f@Ia8TfrGE;9~8_2}0;)4j8{i zXWLvA%ps5W+M7E&NVm7X@~6*918Vu{hY_v&Z5QayQ` z=j{(DSSe>6kq^uuj^2J@60XoK@C|@v%RfRJXUclKK3$6shU-qHj1>u8W0Z4GR@GID z8FIX$lPDjsK2l;N$owi=uh|tsS`hk5ZAo(vw!t7uxQDy0i1A1tI+(WV5e4&-iQf=b zDDOrVw8qzAG8lDdcZ|li2Ve54h22{D+TQ5qN>Q%UQ^8|;5tL{v{^mB zSx6VXgX&^MAndo9c0`|uuv+?q7g8@`{tLI?!Q>VQza8dHa<19UTW&)Rc_VZ^5p}X| z?P4tmak(l=T{cS-iDYoFuoR2)aWubyh^S=;HYPic3qGjh&dvpBf77_iFX3MlB${m$ z+_-J{oGr6w^74G3xR6?a;reZ#PgL1DoNX>k;dQI+bPx%iq&jVmu2E#rLWAMMWP2p} z$=BUobMz!eQJ{IYhpogl)vQlICTZj(LYFIA`b zhn&yx&!@-4N8{xvH*+Gls77_c(0mPmx*?`Pugd29k3Mr+CGQV>MBD2s>YlbYbVs|2 zAmE6dX{=aE21cM96n}&~<^@s}dGTI*n6b+aOWN1$u^uZcJk*G1HI;NQ(ql2z3?u$4 z;K&M!HO7q+CLAV~6(p{3)axxhrYCv*mvbYYa-a~(jBV3%ADOhLZ~_vfSsjiUgoubn+J)?&|YAgGi?I%M?y_`CFK zLO>-~toON8D|th-phOr4S~SHI5lD6eyP?tzGdzQ`V-nH1(YE+%2AQjOz~;y#c7tn< z;-cUq4)l+LA5nsq=IHE{!*!f)9*0V_3ga&kvU^CGl;COdLiRt5&Rz)lCdb#ueR@-i z4F+`c+ebiI*5Jt#YBL6FeaSzQa@-yLIo@FoM4{k7yZv{3H&=iXJfUQ4%X+cVV1G_L z9vOMzc4MnoAX&syHpDL~0G;-r-C>DUXu+qt5 zZZ{BCFSVY#49~rpgWd>&9uH<%amgDbUentv7(x(-c4|oA{?$+i%6qw0CeUg=J?Nr( zk`+g;%A4D!l`AYyCY7}6hFF7l5RV~jRclmFFt)`jgLAC76k8Ne;j&sJ<;j1Gy^b-2_GbLn5NTmQmG?$QeM7_c-bLoSq%My6hM&3^Gz`f zsA=ZAhEPN->zDelTeXNDB$ql&snpTs(Ugl|Z9=yiA(q&i znS(6e**KA`dO@)qCD50k-<>_Qqg^_<+Oc-Mfwb5<{PUn_PKcXYWYY6&*KX_k$c!PL zH?Q=_r-I3wlm_$#Gx`OlO(*;=Q-*9jF}&H!{)zi;riPZ9K^K(>s~;wWYF<*}zDyFN zQ;|Ez(q3q6(VxsNgEsC(1UEC|o2@H|w{zGb5-yU0r(t)gL${oefyu^CyZtX{gnDS2 zT3SmAylT4Cc50Puyh#0+{tSN7zkc26!tu6@s<0{}?6t*9jQxD>7JP`r7ywv&M4fS?WUissm(tfOlEK0J-z^I(HOfp|g`W!F+f|ZxrV;clM zW0{-WYc2{o357m+ZE$n^{?6mAMsA2k^`R*qG-iGFQD*v#o5>;9e%*9>{`xrXa@>fWuH-rv)NJ?a@F(0<+PT!IPWn2)$k7wpF2k;ngtT#mDj(}(S1hJO(++jNN?m;4VevA~4j zvaih)B-Ac`4RL6du8961_b4c`}&&M$M)Auk-1$j*ro+W8PMX3 z%=nMpii0ZcBhmc=Gtli+`@U69DESJ6!8gWJ@Y?jCIKpXbEIO@lDVKXHGQM8hK6w}R zY^T4x{^yl_QENC}C~cP#O4CaP!kG*XUJ!9RiLH@uGaPGj>M_PWNg#@sFT>Q@^0_BS zbibgUK0{{tU*w91JJFe==Kdh`mR{K3N%LhjpT-p|a{39@bM#%<%->g2Y@ekm&}GS? z2p0K1J?=?1TM=xUjk~mo{74qwkuyz+hxn&bw`Ltt>T9UlAn68j%DAYo44cr&0jaIo zHp3P?(w_ct#u0QVQ#NJP-;^tafk&|pVyL&@(YVW*ylwte!4hZ_*ChnG?Z$$-X*WsZ zZ`+Y>oxpu-QW#A0OO~l0C3I2Hfu#%25|Z%G2$Ej z#z2QWD&d1(1P6Vgm;qnM*LwtsLW#OWhwhan3bYb*bHJ_)b zv$*Ye66oc_SRU#)$N1dBzSxRWf&AXbH6w38gp>>i?qDBh^mdJU$zQ5B^9t?1wBKDX zD`?F;%`}(GBA<>kQCNtPoKlt68Nuy_uCB5(IN!Fz4yxFG7l;N2wOwBqQU|O!hPQLq zy*Z%EjgE)x$JXGY49-S=7oqan3N#Dgvwu6J&>9ur!l zNJkrwn#~P#ADU=INK4KOR?_+jBHb5Ozg{qK`IR-*(#X>X%QBR)Z1(V18odtsramx< zymzBF$8od%0_T=SJ(5V?9T$jv;AyKrLTfqqXSk+=o?AIJH{^YiYiA1w@w4rp7CY>r z(tm7TKkpDEYr_ESy69ap-$EkV}F#24=R= zL}2vR`eZ>EN1yuSJ#?E!0XUtrA1#Rno7m!XulV+?1W@sKbO8&F^VDELSz^Ltp%E`4 zXdZ+;>{E<}^gLHCo&31Yes3(zOWrA-(U%47J-y9HKg6?_o;30kNR{F3tuX|={aEy_ z)1Yyy3%J%@qmZoZyj<_EU5ZF_LC==zMV#_H zcCJ9JI4|yrOE{UliI=Hs{Swm{=8XtNvxWS7K6pKvuKS)diP`MqIo9?)Bs1CkKb<47lI%onfx@3RRh=dv^AYYlnc7CGDcj*v%939xX zAo5gny9$>E{Y-rEhRlY6lW_DfM37$8u=kSDTi(?B2=QOU>oLs&dl$-|!yD?Cr$!m- z!~N}^!9mVsnF?+GiO)g=GwYFb+yimxq4j$O&<((0%j`ToHH8UEj+!iY*L~ zSs+f$tIfvl)gUxMG({dr{p*?%OWKTO z1BT)2EU(eBO$l+ERWQ!*SI?F_e+ER*^I5}|%hoV3ViVgV-C#Kk&m(sY_E`G5b@Tc$ zdO#QO$xfn;ixO%JY)$=gyrR41kP^ZRUY(sY!3ha*^m>J>D{lR=M%|R5;o(7e$W{ag z4__h6A;wcZ;J3fc6YB}5+UivfPy-U)1ii<|Wx46Kk;7M3`kL{y4{97%(GSrkrj!1j z6?D^xDZ!D&RpiLrZx|Z~ul%D4N(-;LZT2S@FV4Skr@3mLqa z2xLflH}=;%bgf;gwn%5fDrk{&^wIG%t2q(DN<8xyMW4XiDq4}E44W?`T3%4VM|Cv01?G7FY+SAL`AdIP$U>1si}A zN(YOI-3)%e-3pvHt;|EcvF*RGPpHlS{#Cx&n#2mCF zhG2^h+KQs|K|{({jO}GU(j0eKt0I3RE*-@ihub6I*Hg$HxkF8(Eouo}Zr>3*r z5tzv!ErbO6$#_X^zg2;^c*f8MyD+Ci)Y*1uKgBt$3b#bahC8>6#H_xfa6mecH_kyi zAh(1hG2`N778N#VtCSJ$6niYswg64%!6IipGZR`R!3xqdX}wNcpu6)Wn^hsKvDF`L znzO`VXkPgWgNm~Z{v}d06IpH^ANZW$(F59c`6d4gKJn5uwWlW$${m(kbvHCgI@LzI zg!RbAZR}_)iFw1lR03Xa-oZISGkBC`>B;rwEqTmQ+Njs9 zaYrE)SQ0k9fR#wDpz&}KEym4kiI)Aojo;7v)lXFq=NpFXwyZ8cWe|JYdi!WmZRZ}J zI8eSKo*4pd@0E*Q{j64Z?A?<)FU9 zP%h`Hsa?TqUpeqi9c_^=r z4B)0XvRO44864bfR;4GP2)~vuSkXwcJ)8Ug& zq1;=YTSTlCxxQRiX$6f|3OjDwSQJG_oaWlJT*!wfA+X-(bp~M^HkTx%`=eEI@*O(HNq;2*O_nU)Thzl)pC?Ft2Qfee zF=m~e%MGVenoaby0RGN6zzD zW}&N7X}i-ba9k6jI(A$2^lPBic^mn3oom~>S)PG5R}8M&ZV-JWH~ib9U%%Ammls*U zTQ=4jK56kk&^^h6+=_w4S0|8-x5+Gn>Jd<~olg2QqHkP&wM5uhgjQ&5mj=cifd)WqEdxNcux|4+r z8Vj6sv{=0r;f0WhT2hW`OF_s0_%uk>*0q$}4gn#aqsh9C~o$5(j z*)Vj`LjGD4?-xn#?;gRedZfX;TRb3;w9}f4$+sm;AgQk2ZBCV0hCv0T{(OD|?hynJ zF2rObmuH7?g=}8^2XV+G7+FI3$uXbkc8phO`v4R*^VR|?BX_GI(B|UMh1Q%2CNu=O zLQl*%V1jec!vt_`$itQJG#6hSJo6fbzV}=9l9f~PV|Ax2zcPI>*H?afjw6W2I{XBE z${T!_mIc{L#@k&XAMBPu4*8P-ixGS>*dXyhh!~x{xaQVM9zs7Eqv>s6(D67afgOEg z(bq|4AN4_XYZ-pX<3u!1FP-DCJ0~5t-+ck1nooBFDmrS0&aT0P%?nuiR+><^%~=sl zN&B}Tri~BzW*8dUEe#H~M~O!jN1f|G`l@uxz$H~pz(YjFp|h!Nt?3pTrHSp$r>OT4 zORUiEtOu`XHCJBK;%FDXvi-JO%5^LEXic|9X(?@TjXx2u$X!*r1 zaHAme_x=+MCDlKYA2cZgaQMfTkz`V_?((nE_mG!{CMbOpR$mVnbUMbyMS(mQ;`b5s zO35r9b3UZyv0LFbE3$(jg_0;OElrevVSCGOwRN$1Tc2E*{ADj&@IJkI(e~P!fIkm) zPrf>#vqZ-%6B0+snG9#E>@haL#0gt-4Rrl^RO_-~Jx$s{9K>71SxA~Y^3={U65kH6y6McrNFr{%OT>jBf}xybhYx!R_9z^iA?Bp=9CPl5l)}qFGQX`hv8x< zsnHhvQg9wZ`2}tv%3)zqg~WJAY=m1I*^c<4W}5HO>T9C}5ATVF3(nx0^_vvOW_@?~ z{NTBh6fpeDsf73na*8!HKL!6dB}y0dd#jayhpTJWY~Y2~pidWwj^+(NoRuLHmvtR< z@ziZIOPHzGL@|{`=UQ=xNHF#M{7Gsm3gpj>i5;}g(iO6Sg zl8kINAW)Sf`c%chrlrSKbJS)le0O1KRna2(;tZ8LgNpdDq>+W?mkd}Qx=!2w#ULei z#LYeVB&t|AuVvJKTyKZcyL2B(Ui4_eOg}r!HHGVx=KC#_36HrQXuom?Z|jzvv%KJk z6RQ@N+pw%G$PZ16=4DYIc)$yMp2ly<HZYS-c@sK-I`XXGf(0;u-fq1s@BzE zUTVLkSG}B2i?oNkf~Kb+Wq6on*ZQTi*gcnIO7HvNwmw_W!u@KUNA+&`!BIG#gT00( z(k9GXQ;c-x;XzRAOcD^=0Yg9Mf9i)XY8JEfz6^r3)I>uN;hoboXlv%~}2;v|*VIr(TD>cu3#&7-lYM znvL{oy+6GW632wk>5$3jUs0IOp&DS5kM%m)3cyH_#^PJ7HN331wJ;>P25_4bBGPST z!Aq{Z*2Sca{%_B>YPRuAbg!TKh0;n5^aRe=XPRjd_Yc(g6usFs-zg;Sk_*#D-y_Oy zo24=OP@8B570mq2 zkmW4(c4C4^ViWP0W=#i!Y+@!$>AG%Xd*ku(&w7fk)@lR$!A>+&6zX&Mekk|Xduxcc zt9(S&_qx|cC5OoPJi&k02@eOHhfjRJ&Eom}K7+8K|Es!xpxtvS{$-C1TGAr&}d3d z2f^MuiCWzD8mtq@{Q%t2QyAtcUVN5syWiH4GLB~7deJqmgDttv4o#nkd)%)E7nMIv zglBn#u5q(NT;odek#lZ(Mf9-e!`fKT>on83y>oYqQ*la=!`Zq)Ts{AUp^58!8H;3d zzxAM2AD69#_Ik0SET;lw(9ny*3Jt0gU7gT#QK&+vI5N)0G2==!NA$fk&rpdN^23?) zGhZ2Wc%#-Lyt2j5PY}xs*MojX25s_C90S(CSHBd2E9B`FjL0q{Uqg!s3tok~Ti%R& zNe0B@H#f64u)a8BoT43KE7y~1S))_{Ma{WCI+57_WtNr7XwI2!U>Di#Df7cC`Odg^ z9&uu_0l$;YZ1e#RNQ7dKOh<>1X|RYS*QzfI_2HgIs?OW}Rmeh7og#UcDU2~b$}7eay0NRN14`%E=)epu`{XqSwv>L#>DzO;u(P69 z-PbCG6I2p{oP7BnYedGdWMC+QTl`g6I;c=cZ=DhigV^N&omA}k*>Ns2o%m#^H82Jfgp=h1_VE z70F}nQ5a?=*9yB{b3e0R_bKhVko7Y@$PU^$1C1s}-jdjD88gG(kCDn?q`^)@12Vtq zoy=J_b~Qp_5&Nktr+#mV-!dP@p&k{)#Y(dg^CUTH?s&zL_G8BQ@%f+QkI?YNHB$Pp zXQ&4lX(M8*tPtV7g~r9yV$uEEjp~ZA5IwW@>gByaf);)#;eWNwmY)YfsfqWJ@u7O* zyfFkJ-`ql*o4;;ts7{=XyH<{LwSJ%B!Ge_mj~8vA#a_N%^j*y*KiX6Bp9InikR=Ff zq4!T%>+SbC3w)ab+hZq{Q_OAr&5{1ON2Js%T6aaCq>FkS2e8M#=qn@-kWszJ{k%dk zqp;OgCzkI%fK_06=Kfroes3<+@kKIs?IF)j=z6cEu%#L#faNW>9n5@mdt96j#%^;^VsFZ=PzT|$A2J;JZyH+&br8p^Md5+#<*F69_y=W z_l@!UuEmGU$JHL_u|ka}S_c(Zulwv6}M^ zp%C(3uMz$ELZ;D301qfB|0}3h8q;i2ithKSL$2nFu2g1?x#^!ZbWr61AXAOI zwI`oS_U_O9T?!t(u1WE`utB{}PtJ>CqQ!5>oi)tOcynIT#4Kl_eXtBDV-n?HIA;7c zzWF@JH1FlmY(P%-KK;Py5>r_!`qb``>>na;_JW7>&=H%C<<*k_(ePLRDKyD9n7qbH z^^h>B0L&uk<$S;M5+J?;Hly7{RoR>$H?3=;R|JbW(rMV_>iQP7@VrW{>T<$(5i*4S zRZ2$`8pGsTEdI|+&NVQ6#o15z?JgtgF2N$>FQ?(8lUWN zMS?t<>E+Ym*V=cPGJ$0w@rvabfML5Z7w;^@EBT1aQ5|1M6FRwf-R%2OuP49{B@qx6 zs^l3ZvCI|$mC-f?!r^bLFP*WUp0txGT*XJ!km(Crr$gDO@w!FVdXItkVwjw1nfIH3 zdjw`ICw-(c91S*G=I=lMtZkr<0>M{00?z_HPN(RTse8xe;?mM=3Q(AV6k5J~^~Ex^ zXcgpo9I5zou;H0!2-XC|bAU}rA5?~k9fC!E{fe%zvI=`P0xtV6#P)r{)zx1Ve>M38 zBS4AvBTP00MCWFvtb3tuh8WLvfYF)m1Wu8&JQ>$K;4;QDZbNz8`*Y-6{!HZz17nOc zPzIZHEi8F1#L7H+r4c)ol1|77AOJZxdCfzN{`Lg&4g3T)i>wJuL?HgR9e(BQm@V}z z<$EUhKP1C?Q|mxzO$bOJ=sy~OxOHPllj~y=|DDgyK+jzT9KjuVem_|q36kJYDAGPv z)MEN}ijWn-3-#mq+J`+t$MwYm@(F!FPPhPpPP6~G5!ektJj~`e*f1vKmUC>(05}3@ zo8{bhL4n>A_7pq9_V*bOyaoi5Ln3pE-=F(yz#Y3W_KXk(<2dZ{=m_}koPqjj55N@1 z>4Fc`{DHkackJhAiP4WefK{x!x=4^O{`1zboidvOb0QsZ;HI!vTD?wi`rmZ?)<>Yd zFCAtkCR%~XRQbN_^EJmy^4vupu1+@jfLvrekf-=#^f$7&3bl2=1^ z65QAbBc^KQJGNf8yL+$(JQyt^hlZC4%ra1}_IRG3Mf-!c*W|7Xsv1n?+zNdDt!C%L z<68LvJrYg0q-(>5^m1kozIMtZnxI90@Zr&uzaSF=-9b9oWPQov!5&FN6Dtb1YNB82o50oJij8qQ}F%-SQx@k2_cs%I-@@O zfjnnI774-CC(GR!bs!3ipj80U zlOcIOCm0jkb8m;>XT2e^So~VeVCrNb_-sK#%Q%Aq-v=zEPj0{#TdNsDMjmRhWCDW= zpRmr~QS@}?Wyr@8qb^zwc*g(r1HJZm!uG8=o+^wF!3QBE@;nVb{9>_A@T*H|Hrk|t z+xq_!mBBRmR_AV$P9UNJD}cvz>RdF6q(gU)1NaPSu1IyWCV@H0e`jiu6I*LXaOWbP z{k`K6hL1TRB28HMQB5}{n*jC)MguudM_aegZ4;NEQl@LOwZOyh`6hy*wu`7o*0I3h zrK{6a-H-!2YAczE=Mpb?0`q=^%mH0Q-F;A`}DgNft^Vh>CRyP9tb#?WXph? z^f58&UcJW?neg1qcj!#OW~n8NpN{QCS5zMWZ`3MH%ALK6zf}eJR`z-K(QRtL5f5~G zMc4Z(A&fF%RF}k~#Gj1{*|R`yG#|`U*;9kk!Bc|lV{-V-o$8{u3|tEUArtNRkJn!y z9d6RJ$*tgjSHIU1fQob}JZQTEg=J1v`Y*3@~S_00!iHj-2Pz`u%S-CY9FV zBg|VTix%H5tKbkP2wVWD73Ws;cnj#~IqAciI!=@oz!bf2m@D=I(rR-+eRD+#r=0qP$ZrW&g`D*@)XVio_jB-0Zf#D zAhimRZ$uCb6VPL(X>k>ntwasLQ1}6Oa9mCk2|x!x{94=lEA>@@s=ESLp3G$GU?9Qh zSN&jng|VVXnNPmLE>SeqcVIsxyM#iE4WvyM>FYNSE;du+7=bOQom3sBMRi${^zIrxq|jRN&G+BdVwPT zqvy9@zy<`irGXEfLHNe~&l`kez32DgHuXX=0vL!iVTepa&{%6FMzX(Rt*oIROqt?T zO(MhO)X@n%Q0CVI$a=l$lLV2kx6S?DSsW06>{n&NEFvDAQvC?!ABT z70mGk67Bwf0D0-Xab+R!M_n%)lKuON7b59uD+yU2yqX}s=->UszbJdbPL^!mDXc~!;|oLPkR`LE|`=}UTpv+(Pw507O8 zlg@xkz|&oJ=q5@?hrk5}`->w$*!*=R0rh$v(IepcS~lnS@IqoKr;Y)jfGYuNt84oC z+Ku-H1)&T#-ajngJo^A>a}4mq4KRw`qpAK+Tjw3f*7yH^Gd2lIP%~DiSu2#T0$BtM(iq8TdmTilp3{XN^SjUjM^Gn8e41rPW1i$e7=AD`iF$x+?;#vx#yhM z>zwD~X_6-24&>9MI-SqSCuV=K&Gy&&p)f!xbSIOgR}g>{DSO7uiva-O-UZRW*K+k$ z*v_l^v8pQCd(P3)&rzqXs{5ld+PXLFRy;@T6CGY8ukTVId;lBe6(gi@|0x#2e%$5R z9pALg<*~1Cc|#GrKSQ3Il}=L3K>%~jlQe%kGszPJh)LyaaM`9F(_?wEiM~UFtHq8X zIkvZfSi}9>$8$fpy($4fVf3vm&!v`PPu7X&vA$Eoalz5lP53ufHG}?W`5jn~r6T81 znePD<-L*?k-_!1H{ZE$(hWA9l1vb!LAM{=WinGcgUJA;qs)S(-s`RdUhPkR(F4T!K zaq&I=CN(6QmM8wUv6-{d4=~vN%&f#W{sF+F+V;qP`Crwu>{Z%oU++P?63{p?-;+Xb zq}>XZL$&`#JOiE zjSL%`x&IxU!92v=#&bP_%~yN>JdFMU?&g!oMwp~g^8rh_(SR}IkO5lb-MLcD#?_!Y z%|9CG^Pl> zDt3;7?eX}J@t8H};#{Lds_9SAQDzb;r*F;BU6%=l`X#v!*k3CZ4s|aeiNch1?4zOd z9H(xc{Qj52VW>o5*n$+zJ!cTOa7uGiIxucJfZ;uQ{O9NkZ30Jty4!L010}|o`)hnf zp|ex|Ura{&)|=E1_xqc+0Ha+SZv_Ga;4UhAw;N8G7z-i;6oD%C=+K(TeuE^PWn+^| zyxREjzsG4gBo)A`|DGvriTLq9TgxL3QfArR;}mZbizA5Y-%o>Mm?gl|0Tp9D(xk4F z#(<_0==T3U*BB)rCPh}c1LgN$A#bAe--9W3q1Qj$AIm-RWBcb|{%dbsbTFHjk(9_L zm@+fPhXOh7Q91lWDDUudN21}p{%Y^L_g6N*`g$H7be}moKzmMza`B1kl+x*PUq5T@ zc8cfBd#)qEJ2>LWrwza}MXnRe9FE3r@o35y{ByU2RsGirE`g)GfxPnHzk*+Nb9E{8 zu4}jSSX3#XUj!KdG@m?w5Ag15i)FQxM`iz8n|NI?Qwf7w*vjRq*6Z7^lK*9ay*vsPD49@7bz+ge6%)&&BhxyO|LgKo z1r+%( zOG&J9PFVg{Ki2S+vgr2D{dxlCIofz>D5@Ou`ucxs?jq@hiTmEl^xvj<6hv46LCVpG zKuh~G6Eoa!ftDgK2L=E7_Y{<$l#}{j=(KNta|Z{hG^5nyKkYPpakgXRi=4v}aIgB` zOe#z>UCsB6pz0$PO5^;uI#mMZ@b^6dlCbK3M*>`E)&4Fp_Bvw)YP?3Da_YaA0KPKM z{BQl1I!>rts{54)Y6hzRx3xPjcRj7de&#^}1@Hf<)$rAyh;Dw)lV9; zk!PbtjB;EK>!F47`B#yhuOamv@Z8QEGT(>#FzIo_@zb`ZnJRV^;jSy7Zz%yS z__mDe60uC_ZWlK)CF0b=TkCqYpJ}*0q*FIs;M~3B9uSz+3y3m5p}zC+W@EhI@~2!K zVEFSsH*|^?p0phx&BxXHRpIo+Q7`xj z3Bpx?gGfOYzcm557hv8(;c9`mJtHhpS@bNGtjLLXHkv<$PD(v0`c~`bd=Q`p2wV8u z9Q`GLuuv&@9yCo_H{TopW+|ps~ADz6DrOP8te}i3UPtM}p2@97g7x^xg45GYtaOd(TOt#3o8g zLI~(&YxlRt%9on!wya|#K6XLX;`*68_bhd=QmPN;Wafs_6r4YLhsL3&LKu!PyxWsOlbekc zrt>bKTAl`^7CMrc1Y_Ql!By)48cG;KAwu5)sU?bz zmk6}Wd84a|2m>|C!+sr(tjioLK|ew$qG&8_Vgsd`i@Skeh9khaRE~Mn_Vfa)4T{^D zGlil`N!7ha&w1-;6>)b2zW{g1UQt zeJ$9{sb*;dsQSB4__XJOWa1B6czK5VeME~rZg!H#%>OO+)@ZtXCVa-c(<4yub;T4sDg%@HEtoH#NNkZpLUrqg73A0~wO10gQT*NE5sFZA zN1ju;+yI-s#{f&(`koNDfgr&?CC}T!MH)ImSe!M~O96C#RmyKnA6x8xL;`lAs=F1o z&7m~=zmhM7S=L=!#1x&ZLo^VifExlu6zIdPJYuTdMqxkx_(4Utr0Vm%_{}jG$agbx zS9^LXJi1*eQ`#KLcW~(W=a zwLT0%eXceZzZdxK75Je>2H8*PYG>;Zq@_KEL8tP)pSxjc{w`nl;@)(la}`SEfjX0( z_Dy#Fw4A`5m*B3M=Bk1R*cAHv?8gN$(7Zr|1NW&_bRjr=m|TT^u4&!w^qBO7AFo|{ z|2gv+=d=}6b@9ur7VH$-2n!ghz(i)N`%GCgRxDLky3_U>&x>)@&Q*}e_elOD(VUdUIiScZQ zK`M#ytGYXJ4L%gR5K3lK=+$a;Mzas3Pr$SQ>#f$orJfRIpkjJeH7WyGG5Y}QgQ^gh zIC^=BaZ+aRplk^NhQJD`9vGYkmZ>TG4e2|v>|UDW1tXM*EO zBHT#Q?o*Uc%kG?7-Ic?!xz40-zEj6DW46uf10M1|&N{IHh=31C^MYK-sm^m4S++<& zLhXG0nlbbBOd*#K8+!<_C1lNzz$OYsnuBD|Yv5~Ne&a!R-Tno2$I)6DviOp8W!ReW ziWiC_HGM8Y!tNBjjPT2gtqUn=sM*!m@L;a)TU&|uc{k3jXZkly1uxAUQ@}20XTxhr?NTKkh`TMcfD4r3=WU%9@-JZnj^mfdAm@?wTE5sbXj9 zQrpp5YS#+lTby~$#qn6Sw<+0;rA@;W4CZ}jyBqKuEo6fcz=M*g`j5c6{| zyATy+GG%>htfh*rhCC&=F>s=a$n>jo5}`)5|FEQaefEyY@r&v=T>aKrtvE80vfj>p z%;eQ*8Z#R0aA8gMK}osRGLTs?P1pe$(l<}muL!y%G6wToGxyxW6`#o^vm61eC{Le> zS>B9fW#;4HI<9P396fSr@^$3yoWIM@b)kE}Tx3bHPv8p+nrDKRwC0=R^A7l+T4K&} zf@Ps-El2{;o4MTdq;-f*lt^Lsa!cL1pcBzVpd{c~@=Wb=wJfHx1Ad&~UH=a9q~+f{ zTiHrlV=a}31S$UA&cxrE9jOkobeTAZv(-~H81P7DH5X0;V;b`4xvy5Utn}J1lnoUJ zDSWNFOq`h%Pdu*}df2v@ln&>+>L|Oyfp$e2{YG@)#IcgiJz=?!?T&|XnQi;i_4|}0 z&4q9|S-2m6^0-vvstm!x$ehVydL`bU*9N=!bp2xe;){%NWf6U5G7fw5ci8rYOs$gE z;t4PgF8zvQ{-k_UZ(480=^#WS>c)6MVtc60GwXntr;6y9i_~p^s^245Nd`K&00gh3 zMv$7y(hw=FMu`uqh<=xACY0R?e0U3@0W?>%wVi*;s)+pYxy{ig6oz3|6{~O3%`~Dl zkM2xEHGBT%fSc%8n%XDs^SY*WjHOpHEh)3d{;g<(@iqwJrab-At#ObfaRSh-QULw$ z8#w^0Fis<$bTmAe+SLw$Zw(cqFEI<<-gn{)+3@>Caq%KX-y%MR@XmwmJBpNMCXnqb zDR&6vat)Hlo9=51Wcn&?GB5Yue^)>JpIgV1?i6!ER*MNPNeYkqW-eCzblAMT39SNo zivzGs6_i^?aog%ug3f07Iw~ecHM4dhb`Qvh2k$^( zHJYZ8j<=N-jt@B(#9b1L5a0Li;@_gRU`x2XLs~M}O&n3*Xe_%>fh;>yVZoH8gw%%6T>98Q%X$62szF3TQE!j;q+?dx7<8;P#CU8VOn zdr9!d=G__9(#W>oZI#}v?yDeE(JMGRMySBRylPO{&m7R2MEOyoi@5&@4{z%dBO7Yb zbj*CsZ_RW@EX@Rxbm77G-5HDMn=zi9n6~S?k7@A*Z9e;8rkQCNy-rW&dmFqvi;S8B zmMD6k07GASkG)RlcJT%yvA!Fqui| zUeJ$UFjtsq`T$jxbE9zm$2Dx5>YTaqo4Cz`f+o=3=j){Q^Rymxrwz93+ZX%Di-QqD zkxK51I@e5|&0<`~ceB7XE@xTz;>-k8mhT~fQ9;`v*#9J5VC%{nJ1pGaS)GH;Joe<% z2@3z8v>5&=8N>oP zVag}oHFR4Xz#lYknMafv5iR(ASY2#eUGT7*nfsLHeVXj8tEWHj8f>eq8WEDmApEHR zsTV(j6+Xb2Z};g>**PM$nn&!XA$)#r_r9$5CE8~g5i15I@!PX$5u9u~@r3&eZ>uJ{ z?umc}MtlOg-NfyT`zMg~8XCF(0}6S&;*oa-4HoW@xxtmfp~4TDS`fQKl?4tlrOo~; znc83SxjqMev1?m*n98XXqjjvXeEE81Be&i}1Bd##kaC{VZ6Y)4$S!P z)Po%m2QWG4F;A;0AN~&L<_w--~l)pDXdA+!w_w`{#>EzGl zklI?48Z<~nf@Y1GKBADk8>O1d-jHOpl)r?BHyE;c{m9TFx-cU}s$U_?bp{P=l^NUd zwA}u<9s1p#?dNBI0~7mS4b-=3m;E8HR^|-moHVmxxhXrMqqRIWI+V^kU%ECmW6u;v zpWi{QuHWt8@96hifzipH4?c_LZNx> z3aZ&7!ZG$p{XrH;b>i+%f;Lz*p?M{IrWxArqC?k=zuYOJJLPM3x7i;m;m`gn1f$#E z6zokts2^}~pktLwxfqE}yVr;3Bh!_I!2Ei3@ao+^Kl6Ck2I#9L0;zb z%9I`#x%b4rr%%kw9<*9AkrjfH9!pkO;?6H3ibJ^JqD2Y|0x3l92H~iVNM6xDao7<6 zlI9nPo$?WO({XXs!eN?OGMo9k6t{y3Y?bNWedkmm?}RG{T)tboRQHRtyk@ZEJi{TU z6tt#lm-$6778Ie_7R#1Xh?;PT86^fes3~$0k?xn7stJwqmB&Jl)3pT6VpolUY*WGaX7w6XuZUdT zjYJGI9mE}))$;lnuGN#bcVm_Ew#UV%_ZI~H;9vBs?Q40JM}*uy2Y4kdCkwdIW3B15 z>0~SE-}M~wIU0=X(B0jsm1#}BvBb4+>IS(|IMP98?e&}hp2#iDgh|AVbFhhZA!)?2zYu9&yzBP_;?l$< z{ftP5Qb9#lSMdR2-jy!<^Tw9jS+ZTq*;x}4tH?N|CF;XLsyylrEf(Kn0@{&00&NTO zU)vx9s=DTzN|Kz4k5?z`Y{MbR>J1<-NNMql!SVu3axhFnk7S9UlAu?}M>$Me$SCSy*#~F=f1>kzmF6)O~lkphr zC^XFzG|h^vUKsl}y~1i71$MVt7TP?%VtJgvs5Xywg>c!W6g$@h{zjooA-nVgD>IT; zytr0{j)}c%!f{Fob;0+w)mdGG^l4UO8pL)lN5JME31@^XfVy`fPn+x3Y|TiT`|fM& zZ%82Cru3e#a6UoUc-=~`S~BZ=V`l%;IUI>CIs?jWTVKsYOalQUPw55Ly`hPNl~#_d ztb(`Q4^?@U5(8yDZLA;6-%4GaBeR6`P4brgsF86yo=!iu!18+=k{Qz+L^tUem9ZDM zeP*H(k_Cemw{1>2l3cDpNKUO|MDD{4dAG6&_MICi0d>s#rh| zm^H#tkB1cSZD32Am^tGNOu3!_>n{e3?~b$Ay^;oaWyJfT_5y_8i7}vr3pF(^17d!C z!9`%jdu|6V5nDJZ#ScQaOcstyu^Kay2thcHzVS+lZt+w)T&j(^x3&0UsRhAj^;flD zNjQtTR}@qqN2W77MSiIlds#mTepl3sO_y8WsqS}^VPm7;;el>InM?0+%TzustGP2} z0Bqo{e&vy|@rzd5BA0>zSfNj}mLWq@qP^PF={O~~XWK5$SD(I&1a=*&M9n6Tfz^$z z6jQ`k_E8$n`D}-3Uf>)ProX0@0PzirI2(6eAM+m~OdH?UED81^+dH@_3`RK083sB? zLWTaIw-oyIb~!A`Mfjm8j?ss`AzBwBpH8~Q3{Fc~apr}icD-dDxV%KwWpSWxtSMj7 zdub3lBbl{)Gfa0X0MBX_LyEf_oaL(~l+W7Bz!fT`O@j-X2C##6Zx0y{P6x9Yr>!@) zKYpo<*m>|nXO3geKyx?3`@4>*Zs=CQ_L}1>|5CRhr&w_h$k_Yo8A*HL{kxP_dKWs5 zd0cMhJ0rPl-xz=_=j7X)f|FtVXFV=EA)+#xZ{}H#7X;nA`F>4Xe%ZyXJkrPCi)$>K6$n`hIGTJ!J`MJg zYQey|Z=2opVrTcREi+pLxl8fEi_%%wuI-lIa$zi2ytAhI4;a=9^cMKJFQ>w|S?~Sb zI1iJqUd3ljEDL}>XL9Aa#W9od0`uJgM`ph8`alc#CFuhFxdISKp-}m)M888$bXonHk_x=hU6}sMm#Yv1+p;uI_pxdWXy}77AYgb zGC=vmhi;1av#txsEbLcU`B?Irm zRM#`V{#Ge|E1WCt^!czmMbqsy`zVmjo{^IBN~#oUkrZ_K!TEz-QzYVsM3&oVNyVQ> zBo(W1I$h4C^FnD>2_K`{C2^tR4{0}f+?}0Rr|NaHrhjt&dfFc3GE|N-A20YO5L4ah zzapLAkpIh}GFYnUW7uSCQbNt*&$bnA7VGi0-7BEUDE{8LFEChMBzP66U?al8b@>w^ zuYgS-Bd?BfpoJ?IjlUPR>t5guz!(11&;m7bH@))2R#l>w8limcYg8!CWKwiok_AV-73x(W8e z0%cjIVa+2t(zIZQyFeuj(?gXiydv7hRIWPUg|==cmY@A?^II;B{}{1~urpj%dwH2wnL0 zPR?y&Ki$W^K=v#cU9xo_48x#xD_T^JlQ#>R0${-~=6Jg>ny*VPh`MoxC~a~=q1}Q2 z`YLB^boqPnui`HxNiDYMkf)?Aaxkf5p)3pATI74#XgW#{k?f2p_Ce!f*3Dmf22QZG zoFc65Sur>elxI}7+Fkx&2%px^6K@o7+&-g0UR8Sho`+l6b;b6XG)6HRUg%^_vm2Hr z(wl(7|Mu>zDUCzFGw`m%L7(r_?9HLk}C7Isl>`w!mEZV{^N3K z$ms7@0pj2xrfB|d{whcRfV0i7M7~?rR$7_!BMvY_+?>+|(d@69y;uJb8K|EGb9rwe=o!i$+Qag&h3Oq=w5O= z?#nx!zTL9vHTn@`K&*trR<74^9SZqdPawX2*C^H8))87$=&Zi6By=01KwE5i^LNw% z?e=H#5=xYZd(V3#meRn!*5ul$DguJl-32=7efWYE^=@TPE|l+ z9SiP9x0nEc`I}oG?(tjn>6F5Z?;y-SZkN0;mRsxD;OJ7i8#gJvovfpFC;1%2snkJH zCAB3I>mcyD?|qg&r*A0Hka&_K%SLlKlbYKN&ekA8WYF&@drCBe=$5`En)OC~D@0r} zf4D@Kb>mk;P?$FiF1XIETIWs5()}_B%0)CS%2q=>N)#f~UmQ#IwHKGstCS=5 zJr`KMTBifENb54aqZe!TgSsxQxN*-|It;ee-g@e`B|r4;LiKE+UVYRvXtph5wlOBb z?}r1({%sz!KPhBVcPbkfNK58~sSMV_oN^)>ya?u+_Z!=K9^tlx=9w`UCeyv6u+JL_ zO=@-!}VXGWu(x)0Hm)uH zlyB0u#n~Wx@@cpSgvu|HH`$l(C6VCPI{25ZAS=Z{uH5k$5b%50I2lQ?< z^XXf%+eUozDx*^yS>=`$tVJ{vuuu_ltj}sU(K0({nGs`^gl=$QVk{o#Qc8AiFJrcx z3fG$)F1pi$6{X3$wvt(A*}xE`bk1Iy%=R{T_KzehV|eRxZrn-6w+ADC^dB)ysBTBP zZ?aniUO>2=79yd?0#;$jIILvQ%0a&lRB3?Pv!bav6w`W&MCU z3ob(W_{^TCht(qAnYE}6(FoOQHrSVhN2^v=Uu|N7{Mjey<4#V{=dZzaO8V;kLxQ&r z?}nSV;4fCEQoGIG9@ix2++Q#8XTaAehN()Caik4$7Ip+Aa0AK$M&vDxoFy~j!#@cU zp=y_t_iby6E;K83`FJAPU`teYpH?H$R~-^c?gZQ4j+K4nBKL^>3b#Fe) z(0&ud*CRMyR*;(i7^X%wCTRU^sw^Rn0CDJZMGTd+1tKK-zdQUmvB^xB#RoG^kJUGo zWwL}&C1kE_RWOK#jc74qzeo@bp%`iW@b+1~a|BjOcE)T0dIn8KD$YP&vtm4iavGd0?C z-A!{Er5q&DVJR{%Tj+L|!XLh*>8S@z>pqYAC)N*F+#h8;XAc#L5+oc96-^zWbH-`6 zGW-Y{gOvpR_riyH8l{HNP#OOSGR}mgG6CCerS$2ftWKftf|LdmX~&eI4upcJ1_4$H~6m`+4VEtDS(DAVg5v{h+YP0S*5k`CE=n zJ+?wKyQOzQ?#ih@XNpWUw&SI>T2B>)Ap`n@SKX{)Frt56Qn?va%)#|j;i}pjT652- zKjWbYP55wT7;SI6s){B#rtuGZlfsZWTZ3Ot-Zf@z{3ml?SM%^^yDm`>Y67~{1k~Ay z#w(R4@>xA>rA8vM3UJXHE28II6sMoImNl=3x7L&jAMg`(mhv?rmb@MerL9E5Qm;T5 z=59rr{@F?sUtKVrrLFzt9(cO5N{||9R?EE`pH)y-eDSZEWbrrqRY;-)!%+Jk0;_A4K!If{1|zp}sx93i{%XSe`kmgZ674 zo6t+mR_y$jUp#ptB9oW57RGvkRf$BG)cpCPO8DJ$r4x7459H^MdYT=#MUgH{5@IkxbVs>)=MBBfIV z+Z%N$TDP{{J$mPt#ob3eEjtu^*d}gxBAIGobutsrP(D8AN2i+ox;FDis%*zJi^AAQ z+bK}V9%r>AkYkbClGPk)?i#twQIIq>ksm~OhYh_1N)78%vL zyHS>ZZ;tmK{ESnsusw|e5c)P6%e4u8SG;@%l!3}?2me)HT}4|F)|ccOslruoX)K-e zIQ2?A$*7Si2hO3)bBr>&OF$WY#WO*wRPKr#ll>(z z2>Z~>n2Y)01J3sNshZAOk$C+IVt}@8-r2I(T`3=Eck>Ae$6j*KNM6}2r6*p1oKIcL zTbu-;lYO|7c$pgZj<%KQi~ReKjm5jZk{eMPx2q~?ce%?OrE%_dY)tlUI}J!VfvBG^ z8(u@pz>hiy~BrKalx&z@#C$U?$jah+@)aCrbDOMp+v6HxuepO&c5fK)b z{#XAaK3IHNLSXduGePwu^N;`TBqP?iXDs(8D}WvwOi>)K%h-5~DY;)x%{-i(5X_fl zq@(7pKcs$hdMNe0-YB)H9M);GemK7newD zP6id#anWT@lWpkIH9Df%INc{h-qU^V0-x1UTvSv<@)t#~TI+fRYG$Z1wY=3O2C6B> zl0w>Kga?fyp7PlX<8;yn;8SRJ9rc~_!wCXeh78qJPs^ X_zO&E(LAN5{BERYs#~IM8~XnMx%Ueb literal 0 HcmV?d00001 diff --git a/environment_cpu.yml b/environment_cpu.yml new file mode 100644 index 0000000..9986997 --- /dev/null +++ b/environment_cpu.yml @@ -0,0 +1,44 @@ +name: gnn_lspe +channels: +- pytorch +- dglteam +- conda-forge +- anaconda +- defaults +dependencies: +- python=3.7.4 +- python-dateutil=2.8.0 +- pip=19.2.3 +- pytorch=1.6.0 +- torchvision==0.7.0 +- pillow==6.1 +- dgl=0.6.1 +- numpy=1.19.2 +- matplotlib=3.1.0 +- tensorboard=1.14.0 +- tensorboardx=1.8 +- future=0.18.2 +- absl-py +- networkx=2.3 +- scikit-learn=0.21.2 +- scipy=1.3.0 +- notebook=6.0.0 +- h5py=2.9.0 +- mkl=2019.4 +- ipykernel=5.1.2 +- ipython=7.7.0 +- ipython_genutils=0.2.0 +- ipywidgets=7.5.1 +- jupyter=1.0.0 +- jupyter_client=5.3.1 +- jupyter_console=6.0.0 +- jupyter_core=4.5.0 +- plotly=4.1.1 +- scikit-image=0.15.0 +- requests==2.22.0 +- tqdm==4.43.0 +- pip: + - tensorflow==2.1.0 + - tensorflow-estimator==2.1.0 + - tensorboard==2.1.1 + - ogb==1.3.1 \ No newline at end of file diff --git a/environment_gpu.yml b/environment_gpu.yml new file mode 100644 index 0000000..5192a66 --- /dev/null +++ b/environment_gpu.yml @@ -0,0 +1,47 @@ +name: gnn_lspe +channels: +- pytorch +- dglteam +- conda-forge +- fragcolor +- anaconda +- defaults +dependencies: +- cudatoolkit=10.2 +- cudnn=7.6.5 +- python=3.7.4 +- python-dateutil=2.8.0 +- pip=19.2.3 +- pytorch=1.6.0 +- torchvision==0.7.0 +- pillow==6.1 +- dgl-cuda10.2=0.6.1 +- numpy=1.19.2 +- matplotlib=3.1.0 +- tensorboard=1.14.0 +- tensorboardx=1.8 +- future=0.18.2 +- absl-py +- networkx=2.3 +- scikit-learn=0.21.2 +- scipy=1.3.0 +- notebook=6.0.0 +- h5py=2.9.0 +- mkl=2019.4 +- ipykernel=5.1.2 +- ipython=7.7.0 +- ipython_genutils=0.2.0 +- ipywidgets=7.5.1 +- jupyter=1.0.0 +- jupyter_client=5.3.1 +- jupyter_console=6.0.0 +- jupyter_core=4.5.0 +- plotly=4.1.1 +- scikit-image=0.15.0 +- requests==2.22.0 +- tqdm==4.43.0 +- pip: + - tensorflow-gpu==2.1.0 + - tensorflow-estimator==2.1.0 + - tensorboard==2.1.1 + - ogb==1.3.1 \ No newline at end of file diff --git a/layers/gatedgcn_layer.py b/layers/gatedgcn_layer.py new file mode 100644 index 0000000..258fea4 --- /dev/null +++ b/layers/gatedgcn_layer.py @@ -0,0 +1,84 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import dgl.function as fn + +""" + GatedGCN: Residual Gated Graph ConvNets + An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent) + https://arxiv.org/pdf/1711.07553v2.pdf +""" + +class GatedGCNLayer(nn.Module): + """ + Param: [] + """ + def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False, graph_norm=True): + super().__init__() + self.in_channels = input_dim + self.out_channels = output_dim + self.dropout = dropout + self.batch_norm = batch_norm + self.graph_norm = graph_norm + self.residual = residual + + if input_dim != output_dim: + self.residual = False + + self.A = nn.Linear(input_dim, output_dim, bias=True) + self.B = nn.Linear(input_dim, output_dim, bias=True) + self.C = nn.Linear(input_dim, output_dim, bias=True) + self.D = nn.Linear(input_dim, output_dim, bias=True) + self.E = nn.Linear(input_dim, output_dim, bias=True) + self.bn_node_h = nn.BatchNorm1d(output_dim) + self.bn_node_e = nn.BatchNorm1d(output_dim) + + def forward(self, g, h, p=None, e=None, snorm_n=None): + + h_in = h # for residual connection + e_in = e # for residual connection + + g.ndata['h'] = h + g.ndata['Ah'] = self.A(h) + g.ndata['Bh'] = self.B(h) + g.ndata['Dh'] = self.D(h) + g.ndata['Eh'] = self.E(h) + g.edata['e'] = e + g.edata['Ce'] = self.C(e) + + g.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh')) + g.edata['e'] = g.edata['DEh'] + g.edata['Ce'] + g.edata['sigma'] = torch.sigmoid(g.edata['e']) + g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'), fn.sum('m', 'sum_sigma_h')) + g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma')) + g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (g.ndata['sum_sigma'] + 1e-6) + + h = g.ndata['h'] # result of graph convolution + e = g.edata['e'] # result of graph convolution + + # GN from benchmarking-gnns-v1 + if self.graph_norm: + h = h * snorm_n + + if self.batch_norm: + h = self.bn_node_h(h) # batch normalization + e = self.bn_node_e(e) # batch normalization + + h = F.relu(h) # non-linear activation + e = F.relu(e) # non-linear activation + + if self.residual: + h = h_in + h # residual connection + e = e_in + e # residual connection + + h = F.dropout(h, self.dropout, training=self.training) + e = F.dropout(e, self.dropout, training=self.training) + + return h, None, e + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels) + + \ No newline at end of file diff --git a/layers/gatedgcn_lspe_layer.py b/layers/gatedgcn_lspe_layer.py new file mode 100644 index 0000000..78fcfe0 --- /dev/null +++ b/layers/gatedgcn_lspe_layer.py @@ -0,0 +1,133 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import dgl.function as fn + +import dgl + +""" + GatedGCNLSPE: GatedGCN with LSPE +""" + +class GatedGCNLSPELayer(nn.Module): + """ + Param: [] + """ + def __init__(self, input_dim, output_dim, dropout, batch_norm, use_lapeig_loss=False, residual=False): + super().__init__() + self.in_channels = input_dim + self.out_channels = output_dim + self.dropout = dropout + self.batch_norm = batch_norm + self.residual = residual + self.use_lapeig_loss = use_lapeig_loss + + if input_dim != output_dim: + self.residual = False + + self.A1 = nn.Linear(input_dim*2, output_dim, bias=True) + self.A2 = nn.Linear(input_dim*2, output_dim, bias=True) + self.B1 = nn.Linear(input_dim, output_dim, bias=True) + self.B2 = nn.Linear(input_dim, output_dim, bias=True) + self.B3 = nn.Linear(input_dim, output_dim, bias=True) + self.C1 = nn.Linear(input_dim, output_dim, bias=True) + self.C2 = nn.Linear(input_dim, output_dim, bias=True) + + self.bn_node_h = nn.BatchNorm1d(output_dim) + self.bn_node_e = nn.BatchNorm1d(output_dim) + # self.bn_node_p = nn.BatchNorm1d(output_dim) + + def message_func_for_vij(self, edges): + hj = edges.src['h'] # h_j + pj = edges.src['p'] # p_j + vij = self.A2(torch.cat((hj, pj), -1)) + return {'v_ij': vij} + + def message_func_for_pj(self, edges): + pj = edges.src['p'] # p_j + return {'C2_pj': self.C2(pj)} + + def compute_normalized_eta(self, edges): + return {'eta_ij': edges.data['sigma_hat_eta'] / (edges.dst['sum_sigma_hat_eta'] + 1e-6)} # sigma_hat_eta_ij/ sum_j' sigma_hat_eta_ij' + + def forward(self, g, h, p, e, snorm_n): + + with g.local_scope(): + + # for residual connection + h_in = h + p_in = p + e_in = e + + # For the h's + g.ndata['h'] = h + g.ndata['A1_h'] = self.A1(torch.cat((h, p), -1)) + # self.A2 being used in message_func_for_vij() function + g.ndata['B1_h'] = self.B1(h) + g.ndata['B2_h'] = self.B2(h) + + # For the p's + g.ndata['p'] = p + g.ndata['C1_p'] = self.C1(p) + # self.C2 being used in message_func_for_pj() function + + # For the e's + g.edata['e'] = e + g.edata['B3_e'] = self.B3(e) + + #--------------------------------------------------------------------------------------# + # Calculation of h + g.apply_edges(fn.u_add_v('B1_h', 'B2_h', 'B1_B2_h')) + g.edata['hat_eta'] = g.edata['B1_B2_h'] + g.edata['B3_e'] + g.edata['sigma_hat_eta'] = torch.sigmoid(g.edata['hat_eta']) + g.update_all(fn.copy_e('sigma_hat_eta', 'm'), fn.sum('m', 'sum_sigma_hat_eta')) # sum_j' sigma_hat_eta_ij' + g.apply_edges(self.compute_normalized_eta) # sigma_hat_eta_ij/ sum_j' sigma_hat_eta_ij' + g.apply_edges(self.message_func_for_vij) # v_ij + g.edata['eta_mul_v'] = g.edata['eta_ij'] * g.edata['v_ij'] # eta_ij * v_ij + g.update_all(fn.copy_e('eta_mul_v', 'm'), fn.sum('m', 'sum_eta_v')) # sum_j eta_ij * v_ij + g.ndata['h'] = g.ndata['A1_h'] + g.ndata['sum_eta_v'] + + # Calculation of p + g.apply_edges(self.message_func_for_pj) # p_j + g.edata['eta_mul_p'] = g.edata['eta_ij'] * g.edata['C2_pj'] # eta_ij * C2_pj + g.update_all(fn.copy_e('eta_mul_p', 'm'), fn.sum('m', 'sum_eta_p')) # sum_j eta_ij * C2_pj + g.ndata['p'] = g.ndata['C1_p'] + g.ndata['sum_eta_p'] + + #--------------------------------------------------------------------------------------# + + # passing towards output + h = g.ndata['h'] + p = g.ndata['p'] + e = g.edata['hat_eta'] + + # GN from benchmarking-gnns-v1 + h = h * snorm_n + + # batch normalization + if self.batch_norm: + h = self.bn_node_h(h) + e = self.bn_node_e(e) + # No BN for p + + # non-linear activation + h = F.relu(h) + e = F.relu(e) + p = torch.tanh(p) + + # residual connection + if self.residual: + h = h_in + h + p = p_in + p + e = e_in + e + + # dropout + h = F.dropout(h, self.dropout, training=self.training) + p = F.dropout(p, self.dropout, training=self.training) + e = F.dropout(e, self.dropout, training=self.training) + + return h, p, e + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels) \ No newline at end of file diff --git a/layers/graphit_gt_layer.py b/layers/graphit_gt_layer.py new file mode 100644 index 0000000..972e40e --- /dev/null +++ b/layers/graphit_gt_layer.py @@ -0,0 +1,273 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import dgl.function as fn +import numpy as np + +""" + GraphiT-GT + +""" + +""" + Util functions +""" +def src_dot_dst(src_field, dst_field, out_field): + def func(edges): + return {out_field: (edges.src[src_field] * edges.dst[dst_field])} + return func + + +def scaling(field, scale_constant): + def func(edges): + return {field: ((edges.data[field]) / scale_constant)} + return func + +# Improving implicit attention scores with explicit edge features, if available +def imp_exp_attn(implicit_attn, explicit_edge): + """ + implicit_attn: the output of K Q + explicit_edge: the explicit edge features + """ + def func(edges): + return {implicit_attn: (edges.data[implicit_attn] * edges.data[explicit_edge])} + return func + + +def exp(field): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))} + return func + +def adaptive_edge_PE(field, adaptive_weight): + def func(edges): + # initial shape was: adaptive_weight: [edges,1]; data: [edges, num_heads, 1] + # repeating adaptive_weight to have: [edges, num_heads, 1] + edges.data['tmp'] = edges.data[adaptive_weight].repeat(1, edges.data[field].shape[1]).unsqueeze(-1) + return {'score_soft': edges.data['tmp'] * edges.data[field]} + return func + + +""" + Single Attention Head +""" + +class MultiHeadAttentionLayer(nn.Module): + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, use_bias, adaptive_edge_PE, attention_for): + super().__init__() + + + self.out_dim = out_dim + self.num_heads = num_heads + self.gamma = gamma + self.full_graph=full_graph + self.attention_for = attention_for + self.adaptive_edge_PE = adaptive_edge_PE + + if self.attention_for == "h": + if use_bias: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + def propagate_attention(self, g): + + + if self.full_graph: + real_ids = torch.nonzero(g.edata['real']).squeeze() + fake_ids = torch.nonzero(g.edata['real']==0).squeeze() + + else: + real_ids = g.edges(form='eid') + + g.apply_edges(src_dot_dst('K_h', 'Q_h', 'score'), edges=real_ids) + + if self.full_graph: + g.apply_edges(src_dot_dst('K_2h', 'Q_2h', 'score'), edges=fake_ids) + + + # scale scores by sqrt(d) + g.apply_edges(scaling('score', np.sqrt(self.out_dim))) + + # Use available edge features to modify the scores for edges + g.apply_edges(imp_exp_attn('score', 'E'), edges=real_ids) + + if self.full_graph: + g.apply_edges(imp_exp_attn('score', 'E_2'), edges=fake_ids) + + g.apply_edges(exp('score')) + + # Adaptive weighting with k_RW_eij + # Only applicable to full graph, For NOW + if self.adaptive_edge_PE and self.full_graph: + g.apply_edges(adaptive_edge_PE('score_soft', 'k_RW')) + del g.edata['tmp'] + + # Send weighted values to target nodes + eids = g.edges() + g.send_and_recv(eids, fn.src_mul_edge('V_h', 'score_soft', 'V_h'), fn.sum('V_h', 'wV')) + g.send_and_recv(eids, fn.copy_edge('score_soft', 'score_soft'), fn.sum('score_soft', 'z')) + + + def forward(self, g, h, e): + + Q_h = self.Q(h) + K_h = self.K(h) + E = self.E(e) + + if self.full_graph: + Q_2h = self.Q_2(h) + K_2h = self.K_2(h) + E_2 = self.E_2(e) + + V_h = self.V(h) + + + # Reshaping into [num_nodes, num_heads, feat_dim] to + # get projections for multi-head attention + g.ndata['Q_h'] = Q_h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_h'] = K_h.view(-1, self.num_heads, self.out_dim) + g.edata['E'] = E.view(-1, self.num_heads, self.out_dim) + + + if self.full_graph: + g.ndata['Q_2h'] = Q_2h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_2h'] = K_2h.view(-1, self.num_heads, self.out_dim) + g.edata['E_2'] = E_2.view(-1, self.num_heads, self.out_dim) + + g.ndata['V_h'] = V_h.view(-1, self.num_heads, self.out_dim) + + self.propagate_attention(g) + + h_out = g.ndata['wV'] / (g.ndata['z'] + torch.full_like(g.ndata['z'], 1e-6)) + + del g.ndata['wV'] + del g.ndata['z'] + del g.ndata['Q_h'] + del g.ndata['K_h'] + del g.edata['E'] + + if self.full_graph: + del g.ndata['Q_2h'] + del g.ndata['K_2h'] + del g.edata['E_2'] + + return h_out + + +class GraphiT_GT_Layer(nn.Module): + """ + Param: + """ + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, dropout=0.0, + layer_norm=False, batch_norm=True, residual=True, adaptive_edge_PE=False, use_bias=False): + super().__init__() + + self.in_channels = in_dim + self.out_channels = out_dim + self.num_heads = num_heads + self.dropout = dropout + self.residual = residual + self.layer_norm = layer_norm + self.batch_norm = batch_norm + + self.attention_h = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, adaptive_edge_PE, attention_for="h") + + self.O_h = nn.Linear(out_dim, out_dim) + + if self.layer_norm: + self.layer_norm1_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm1_h = nn.BatchNorm1d(out_dim) + + # FFN for h + self.FFN_h_layer1 = nn.Linear(out_dim, out_dim*2) + self.FFN_h_layer2 = nn.Linear(out_dim*2, out_dim) + + if self.layer_norm: + self.layer_norm2_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm2_h = nn.BatchNorm1d(out_dim) + + + def forward(self, g, h, p, e, snorm_n): + h_in1 = h # for first residual connection + + # [START] For calculation of h ----------------------------------------------------------------- + + # multi-head attention out + h_attn_out = self.attention_h(g, h, e) + + #Concat multi-head outputs + h = h_attn_out.view(-1, self.out_channels) + + h = F.dropout(h, self.dropout, training=self.training) + + h = self.O_h(h) + + if self.residual: + h = h_in1 + h # residual connection + + # # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm1_h(h) + + if self.batch_norm: + h = self.batch_norm1_h(h) + + h_in2 = h # for second residual connection + + # FFN for h + h = self.FFN_h_layer1(h) + h = F.relu(h) + h = F.dropout(h, self.dropout, training=self.training) + h = self.FFN_h_layer2(h) + + if self.residual: + h = h_in2 + h # residual connection + + # # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm2_h(h) + + if self.batch_norm: + h = self.batch_norm2_h(h) + + # [END] For calculation of h ----------------------------------------------------------------- + + + return h, None + + def __repr__(self): + return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels, self.num_heads, self.residual) \ No newline at end of file diff --git a/layers/graphit_gt_lspe_layer.py b/layers/graphit_gt_lspe_layer.py new file mode 100644 index 0000000..9c81a75 --- /dev/null +++ b/layers/graphit_gt_lspe_layer.py @@ -0,0 +1,325 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import dgl.function as fn +import numpy as np + +""" + GraphiT-GT-LSPE: GraphiT-GT with LSPE + +""" + +""" + Util functions +""" +def src_dot_dst(src_field, dst_field, out_field): + def func(edges): + return {out_field: (edges.src[src_field] * edges.dst[dst_field])} + return func + + +def scaling(field, scale_constant): + def func(edges): + return {field: ((edges.data[field]) / scale_constant)} + return func + +# Improving implicit attention scores with explicit edge features, if available +def imp_exp_attn(implicit_attn, explicit_edge): + """ + implicit_attn: the output of K Q + explicit_edge: the explicit edge features + """ + def func(edges): + return {implicit_attn: (edges.data[implicit_attn] * edges.data[explicit_edge])} + return func + + +def exp(field): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))} + return func + +def adaptive_edge_PE(field, adaptive_weight): + def func(edges): + # initial shape was: adaptive_weight: [edges,1]; data: [edges, num_heads, 1] + # repeating adaptive_weight to have: [edges, num_heads, 1] + edges.data['tmp'] = edges.data[adaptive_weight].repeat(1, edges.data[field].shape[1]).unsqueeze(-1) + return {'score_soft': edges.data['tmp'] * edges.data[field]} + return func + + +""" + Single Attention Head +""" + +class MultiHeadAttentionLayer(nn.Module): + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, use_bias, adaptive_edge_PE, attention_for): + super().__init__() + + + self.out_dim = out_dim + self.num_heads = num_heads + self.gamma = gamma + self.full_graph=full_graph + self.attention_for = attention_for + self.adaptive_edge_PE = adaptive_edge_PE + + if self.attention_for == "h": # attention module for h has input h = [h,p], so 2*in_dim for Q,K,V + if use_bias: + self.Q = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + + elif self.attention_for == "p": # attention module for p + if use_bias: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + def propagate_attention(self, g): + + + if self.full_graph: + real_ids = torch.nonzero(g.edata['real']).squeeze() + fake_ids = torch.nonzero(g.edata['real']==0).squeeze() + + else: + real_ids = g.edges(form='eid') + + g.apply_edges(src_dot_dst('K_h', 'Q_h', 'score'), edges=real_ids) + + if self.full_graph: + g.apply_edges(src_dot_dst('K_2h', 'Q_2h', 'score'), edges=fake_ids) + + + # scale scores by sqrt(d) + g.apply_edges(scaling('score', np.sqrt(self.out_dim))) + + # Use available edge features to modify the scores for edges + g.apply_edges(imp_exp_attn('score', 'E'), edges=real_ids) + + if self.full_graph: + g.apply_edges(imp_exp_attn('score', 'E_2'), edges=fake_ids) + + g.apply_edges(exp('score')) + + # Adaptive weighting with k_RW_eij + # Only applicable to full graph, For NOW + if self.adaptive_edge_PE and self.full_graph: + g.apply_edges(adaptive_edge_PE('score_soft', 'k_RW')) + del g.edata['tmp'] + + # Send weighted values to target nodes + eids = g.edges() + g.send_and_recv(eids, fn.src_mul_edge('V_h', 'score_soft', 'V_h'), fn.sum('V_h', 'wV')) + g.send_and_recv(eids, fn.copy_edge('score_soft', 'score_soft'), fn.sum('score_soft', 'z')) + + + def forward(self, g, h, p, e): + if self.attention_for == "h": + h = torch.cat((h, p), -1) + elif self.attention_for == "p": + h = p + + Q_h = self.Q(h) + K_h = self.K(h) + E = self.E(e) + + if self.full_graph: + Q_2h = self.Q_2(h) + K_2h = self.K_2(h) + E_2 = self.E_2(e) + + V_h = self.V(h) + + + # Reshaping into [num_nodes, num_heads, feat_dim] to + # get projections for multi-head attention + g.ndata['Q_h'] = Q_h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_h'] = K_h.view(-1, self.num_heads, self.out_dim) + g.edata['E'] = E.view(-1, self.num_heads, self.out_dim) + + + if self.full_graph: + g.ndata['Q_2h'] = Q_2h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_2h'] = K_2h.view(-1, self.num_heads, self.out_dim) + g.edata['E_2'] = E_2.view(-1, self.num_heads, self.out_dim) + + g.ndata['V_h'] = V_h.view(-1, self.num_heads, self.out_dim) + + self.propagate_attention(g) + + h_out = g.ndata['wV'] / (g.ndata['z'] + torch.full_like(g.ndata['z'], 1e-6)) + + del g.ndata['wV'] + del g.ndata['z'] + del g.ndata['Q_h'] + del g.ndata['K_h'] + del g.edata['E'] + + if self.full_graph: + del g.ndata['Q_2h'] + del g.ndata['K_2h'] + del g.edata['E_2'] + + return h_out + + +class GraphiT_GT_LSPE_Layer(nn.Module): + """ + Param: + """ + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, dropout=0.0, + layer_norm=False, batch_norm=True, residual=True, adaptive_edge_PE=False, use_bias=False): + super().__init__() + + self.in_channels = in_dim + self.out_channels = out_dim + self.num_heads = num_heads + self.dropout = dropout + self.residual = residual + self.layer_norm = layer_norm + self.batch_norm = batch_norm + + self.attention_h = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, adaptive_edge_PE, attention_for="h") + self.attention_p = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, adaptive_edge_PE, attention_for="p") + + self.O_h = nn.Linear(out_dim, out_dim) + self.O_p = nn.Linear(out_dim, out_dim) + + if self.layer_norm: + self.layer_norm1_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm1_h = nn.BatchNorm1d(out_dim) + + # FFN for h + self.FFN_h_layer1 = nn.Linear(out_dim, out_dim*2) + self.FFN_h_layer2 = nn.Linear(out_dim*2, out_dim) + + if self.layer_norm: + self.layer_norm2_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm2_h = nn.BatchNorm1d(out_dim) + + + def forward(self, g, h, p, e, snorm_n): + h_in1 = h # for first residual connection + p_in1 = p # for first residual connection + + # [START] For calculation of h ----------------------------------------------------------------- + + # multi-head attention out + h_attn_out = self.attention_h(g, h, p, e) + + #Concat multi-head outputs + h = h_attn_out.view(-1, self.out_channels) + + h = F.dropout(h, self.dropout, training=self.training) + + h = self.O_h(h) + + if self.residual: + h = h_in1 + h # residual connection + + # # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm1_h(h) + + if self.batch_norm: + h = self.batch_norm1_h(h) + + h_in2 = h # for second residual connection + + # FFN for h + h = self.FFN_h_layer1(h) + h = F.relu(h) + h = F.dropout(h, self.dropout, training=self.training) + h = self.FFN_h_layer2(h) + + if self.residual: + h = h_in2 + h # residual connection + + # # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm2_h(h) + + if self.batch_norm: + h = self.batch_norm2_h(h) + + # [END] For calculation of h ----------------------------------------------------------------- + + + # [START] For calculation of p ----------------------------------------------------------------- + + # multi-head attention out + p_attn_out = self.attention_p(g, None, p, e) + + #Concat multi-head outputs + p = p_attn_out.view(-1, self.out_channels) + + p = F.dropout(p, self.dropout, training=self.training) + + p = self.O_p(p) + + p = torch.tanh(p) + + if self.residual: + p = p_in1 + p # residual connection + + # [END] For calculation of p ----------------------------------------------------------------- + + return h, p + + def __repr__(self): + return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels, self.num_heads, self.residual) \ No newline at end of file diff --git a/layers/mlp_readout_layer.py b/layers/mlp_readout_layer.py new file mode 100644 index 0000000..20a4463 --- /dev/null +++ b/layers/mlp_readout_layer.py @@ -0,0 +1,45 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +""" + MLP Layer used after graph vector representation +""" + +class MLPReadout(nn.Module): + + def __init__(self, input_dim, output_dim, L=2): #L=nb_hidden_layers + super().__init__() + list_FC_layers = [ nn.Linear( input_dim//2**l , input_dim//2**(l+1) , bias=True ) for l in range(L) ] + list_FC_layers.append(nn.Linear( input_dim//2**L , output_dim , bias=True )) + self.FC_layers = nn.ModuleList(list_FC_layers) + self.L = L + + def forward(self, x): + y = x + for l in range(self.L): + y = self.FC_layers[l](y) + y = F.relu(y) + y = self.FC_layers[self.L](y) + return y + + + +class MLPReadout2(nn.Module): + + def __init__(self, input_dim, output_dim, dropout_2=0.0, L=2): # L=nb_hidden_layers + super().__init__() + list_FC_layers = [nn.Linear(input_dim // 2 ** l, input_dim // 2 ** (l + 1), bias=True) for l in range(L)] + list_FC_layers.append(nn.Linear(input_dim // 2 ** L, output_dim, bias=True)) + self.FC_layers = nn.ModuleList(list_FC_layers) + self.L = L + self.dropout_2 = dropout_2 + + def forward(self, x): + y = x + for l in range(self.L): + y = F.dropout(y, self.dropout_2, training=self.training) + y = self.FC_layers[l](y) + y = F.relu(y) + y = self.FC_layers[self.L](y) + return y \ No newline at end of file diff --git a/layers/pna_layer.py b/layers/pna_layer.py new file mode 100644 index 0000000..1140e39 --- /dev/null +++ b/layers/pna_layer.py @@ -0,0 +1,270 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import dgl.function as fn +import dgl + +from .pna_utils import AGGREGATORS, SCALERS, MLP, FCLayer + +""" + PNA: Principal Neighbourhood Aggregation + Gabriele Corso, Luca Cavalleri, Dominique Beaini, Pietro Lio, Petar Velickovic + https://arxiv.org/abs/2004.05718 +""" + + +class PNATower(nn.Module): + def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, + pretrans_layers, posttrans_layers, edge_features, edge_dim): + super().__init__() + self.dropout = dropout + self.graph_norm = graph_norm + self.batch_norm = batch_norm + self.edge_features = edge_features + + self.batchnorm_h = nn.BatchNorm1d(out_dim) + self.aggregators = aggregators + self.scalers = scalers + self.pretrans_h = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') + + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + + self.avg_d = avg_d + + def pretrans_edges(self, edges): + if self.edge_features: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) + else: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h']], dim=1) + + return {'e_for_h': self.pretrans_h(z2_for_h)} + + # Message func for h + def message_func_for_h(self, edges): + return {'e_for_h': edges.data['e_for_h']} + + # Reduce func for h + def reduce_func_for_h(self, nodes): + h = nodes.mailbox['e_for_h'] + D = h.shape[-2] + h = torch.cat([aggregate(h) for aggregate in self.aggregators], dim=1) + h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'h': h} + + def forward(self, g, h, e, snorm_n): + g.ndata['h'] = h + + if self.edge_features: # add the edges information only if edge_features = True + g.edata['ef'] = e + + # pretransformation + g.apply_edges(self.pretrans_edges) + + # aggregation for h + g.update_all(self.message_func_for_h, self.reduce_func_for_h) + h = torch.cat([h, g.ndata['h']], dim=1) + + # posttransformation + h = self.posttrans_h(h) + + # graph and batch normalization + if self.graph_norm: + h = h * snorm_n + + if self.batch_norm: + h = self.batchnorm_h(h) + + h = F.dropout(h, self.dropout, training=self.training) + + return h + + +class PNALayer(nn.Module): + + def __init__(self, in_dim, out_dim, aggregators, scalers, avg_d, dropout, graph_norm, batch_norm, towers=1, + pretrans_layers=1, posttrans_layers=1, divide_input=True, residual=False, edge_features=False, + edge_dim=0): + """ + :param in_dim: size of the input per node + :param out_dim: size of the output per node + :param aggregators: set of aggregation function identifiers + :param scalers: set of scaling functions identifiers + :param avg_d: average degree of nodes in the training set, used by scalers to normalize + :param dropout: dropout used + :param graph_norm: whether to use graph normalisation + :param batch_norm: whether to use batch normalisation + :param towers: number of towers to use + :param pretrans_layers: number of layers in the transformation before the aggregation + :param posttrans_layers: number of layers in the transformation after the aggregation + :param divide_input: whether the input features should be split between towers or not + :param residual: whether to add a residual connection + :param edge_features: whether to use the edge features + :param edge_dim: size of the edge features + """ + super().__init__() + assert ((not divide_input) or in_dim % towers == 0), "if divide_input is set the number of towers has to divide in_dim" + assert (out_dim % towers == 0), "the number of towers has to divide the out_dim" + assert avg_d is not None + + # retrieve the aggregators and scalers functions + aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] + scalers = [SCALERS[scale] for scale in scalers.split()] + + self.divide_input = divide_input + self.input_tower = in_dim // towers if divide_input else in_dim + self.output_tower = out_dim // towers + self.in_dim = in_dim + self.out_dim = out_dim + self.edge_features = edge_features + self.residual = residual + if in_dim != out_dim: + self.residual = False + + # convolution + self.towers = nn.ModuleList() + for _ in range(towers): + self.towers.append(PNATower(in_dim=self.input_tower, out_dim=self.output_tower, aggregators=aggregators, + scalers=scalers, avg_d=avg_d, pretrans_layers=pretrans_layers, + posttrans_layers=posttrans_layers, batch_norm=batch_norm, dropout=dropout, + graph_norm=graph_norm, edge_features=edge_features, edge_dim=edge_dim)) + # mixing network + self.mixing_network_h = FCLayer(out_dim, out_dim, activation='LeakyReLU') + + def forward(self, g, h, p, e, snorm_n): + h_in = h # for residual connection + + if self.divide_input: + tower_outs = [tower(g, + h[:, n_tower * self.input_tower: (n_tower + 1) * self.input_tower], + e, + snorm_n) for n_tower, tower in enumerate(self.towers)] + h_tower_outs = tower_outs + h_cat = torch.cat(h_tower_outs, dim=1) + else: + tower_outs = [tower(g, h, p, e, snorm_n) for tower in self.towers] + h_tower_outs = tower_outs + h_cat = torch.cat(h_tower_outs, dim=1) + + h_out = self.mixing_network_h(h_cat) + + if self.residual: + h_out = h_in + h_out # residual connection + + return h_out, None + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim) + + + + + +# This layer file below has no towers +# and is similar to DGNLayerComplex used for best PNA score on MOLPCBA +# implemented here https://github.com/Saro00/DGN/blob/master/models/dgl/dgn_layer.py + +class PNANoTowersLayer(nn.Module): + def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, + pretrans_layers, posttrans_layers, residual, edge_features, edge_dim=0, use_lapeig_loss=False): + super().__init__() + self.dropout = dropout + self.graph_norm = graph_norm + self.batch_norm = batch_norm + self.edge_features = edge_features + self.in_dim = in_dim + self.out_dim = out_dim + self.residual = residual + if in_dim != out_dim: + self.residual = False + + self.batchnorm_h = nn.BatchNorm1d(out_dim) + + # retrieve the aggregators and scalers functions + aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] + scalers = [SCALERS[scale] for scale in scalers.split()] + + self.aggregators = aggregators + self.scalers = scalers + + if self.edge_features: + self.pretrans_h = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') + + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + else: + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers)) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + + self.avg_d = avg_d + + def pretrans_edges(self, edges): + if self.edge_features: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) + else: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h']], dim=1) + + return {'e_for_h': self.pretrans_h(z2_for_h)} + + # Message func for h + def message_func_for_h(self, edges): + return {'e_for_h': edges.data['e_for_h']} + + # Reduce func for h + def reduce_func_for_h(self, nodes): + if self.edge_features: + h = nodes.mailbox['e_for_h'] + else: + h = nodes.mailbox['m_h'] + D = h.shape[-2] + h = torch.cat([aggregate(h) for aggregate in self.aggregators], dim=1) + if len(self.scalers) > 1: + h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'h': h} + + def forward(self, g, h, p, e, snorm_n): + + h = F.dropout(h, self.dropout, training=self.training) + + h_in = h # for residual connection + + g.ndata['h'] = h + + if self.edge_features: # add the edges information only if edge_features = True + g.edata['ef'] = e + + if self.edge_features: + # pretransformation + g.apply_edges(self.pretrans_edges) + + if self.edge_features: + # aggregation for h + g.update_all(self.message_func_for_h, self.reduce_func_for_h) + h = torch.cat([h, g.ndata['h']], dim=1) + else: + # aggregation for h + g.update_all(fn.copy_u('h', 'm_h'), self.reduce_func_for_h) + h = g.ndata['h'] + + # posttransformation + h = self.posttrans_h(h) + + # graph and batch normalization + if self.graph_norm and self.edge_features: + h = h * snorm_n + + if self.batch_norm: + h = self.batchnorm_h(h) + + h = F.relu(h) + + if self.residual: + h = h_in + h # residual connection + + return h, None + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim) + diff --git a/layers/pna_lspe_layer.py b/layers/pna_lspe_layer.py new file mode 100644 index 0000000..4fd35ba --- /dev/null +++ b/layers/pna_lspe_layer.py @@ -0,0 +1,350 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import dgl.function as fn +import dgl + +from .pna_utils import AGGREGATORS, SCALERS, MLP, FCLayer + +""" + PNALSPE: PNA with LSPE + + PNA: Principal Neighbourhood Aggregation + Gabriele Corso, Luca Cavalleri, Dominique Beaini, Pietro Lio, Petar Velickovic + https://arxiv.org/abs/2004.05718 +""" + + +class PNATower(nn.Module): + def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, + pretrans_layers, posttrans_layers, edge_features, edge_dim): + super().__init__() + self.dropout = dropout + self.graph_norm = graph_norm + self.batch_norm = batch_norm + self.edge_features = edge_features + + self.batchnorm_h = nn.BatchNorm1d(out_dim) + self.aggregators = aggregators + self.scalers = scalers + self.pretrans_h = MLP(in_size=2 * 2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') + self.pretrans_p = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='tanh', last_activation='none') + + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers) + 2) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + self.posttrans_p = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='tanh', last_activation='none') + + self.avg_d = avg_d + + def pretrans_edges(self, edges): + if self.edge_features: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) + z2_for_p = torch.cat([edges.src['p'], edges.dst['p'], edges.data['ef']], dim=1) + else: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h']], dim=1) + z2_for_p = torch.cat([edges.src['p'], edges.dst['p']], dim=1) + + return {'e_for_h': self.pretrans_h(z2_for_h), 'e_for_p': self.pretrans_p(z2_for_p)} + + # Message func for h + def message_func_for_h(self, edges): + return {'e_for_h': edges.data['e_for_h']} + + # Reduce func for h + def reduce_func_for_h(self, nodes): + h = nodes.mailbox['e_for_h'] + D = h.shape[-2] + h = torch.cat([aggregate(h) for aggregate in self.aggregators], dim=1) + h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'h': h} + + # Message func for p + def message_func_for_p(self, edges): + return {'e_for_p': edges.data['e_for_p']} + + # Reduce func for p + def reduce_func_for_p(self, nodes): + p = nodes.mailbox['e_for_p'] + D = p.shape[-2] + p = torch.cat([aggregate(p) for aggregate in self.aggregators], dim=1) + p = torch.cat([scale(p, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'p': p} + + def forward(self, g, h, p, e, snorm_n): + g.ndata['h'] = h + g.ndata['p'] = p + + if self.edge_features: # add the edges information only if edge_features = True + g.edata['ef'] = e + + # pretransformation + g.apply_edges(self.pretrans_edges) + + # aggregation for h + g.update_all(self.message_func_for_h, self.reduce_func_for_h) + h = torch.cat([h, g.ndata['h']], dim=1) + + # aggregation for p + g.update_all(self.message_func_for_p, self.reduce_func_for_p) + p = torch.cat([p, g.ndata['p']], dim=1) + + # posttransformation + h = self.posttrans_h(h) + p = self.posttrans_p(p) + + # graph and batch normalization + if self.graph_norm: + h = h * snorm_n + + if self.batch_norm: + h = self.batchnorm_h(h) + + h = F.dropout(h, self.dropout, training=self.training) + p = F.dropout(p, self.dropout, training=self.training) + + return h, p + + +class PNALSPELayer(nn.Module): + + def __init__(self, in_dim, out_dim, aggregators, scalers, avg_d, dropout, graph_norm, batch_norm, towers=1, + pretrans_layers=1, posttrans_layers=1, divide_input=True, residual=False, edge_features=False, + edge_dim=0): + """ + :param in_dim: size of the input per node + :param out_dim: size of the output per node + :param aggregators: set of aggregation function identifiers + :param scalers: set of scaling functions identifiers + :param avg_d: average degree of nodes in the training set, used by scalers to normalize + :param dropout: dropout used + :param graph_norm: whether to use graph normalisation + :param batch_norm: whether to use batch normalisation + :param towers: number of towers to use + :param pretrans_layers: number of layers in the transformation before the aggregation + :param posttrans_layers: number of layers in the transformation after the aggregation + :param divide_input: whether the input features should be split between towers or not + :param residual: whether to add a residual connection + :param edge_features: whether to use the edge features + :param edge_dim: size of the edge features + """ + super().__init__() + assert ((not divide_input) or in_dim % towers == 0), "if divide_input is set the number of towers has to divide in_dim" + assert (out_dim % towers == 0), "the number of towers has to divide the out_dim" + assert avg_d is not None + + # retrieve the aggregators and scalers functions + aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] + scalers = [SCALERS[scale] for scale in scalers.split()] + + self.divide_input = divide_input + self.input_tower = in_dim // towers if divide_input else in_dim + self.output_tower = out_dim // towers + self.in_dim = in_dim + self.out_dim = out_dim + self.edge_features = edge_features + self.residual = residual + if in_dim != out_dim: + self.residual = False + + # convolution + self.towers = nn.ModuleList() + for _ in range(towers): + self.towers.append(PNATower(in_dim=self.input_tower, out_dim=self.output_tower, aggregators=aggregators, + scalers=scalers, avg_d=avg_d, pretrans_layers=pretrans_layers, + posttrans_layers=posttrans_layers, batch_norm=batch_norm, dropout=dropout, + graph_norm=graph_norm, edge_features=edge_features, edge_dim=edge_dim)) + # mixing network + self.mixing_network_h = FCLayer(out_dim, out_dim, activation='LeakyReLU') + self.mixing_network_p = FCLayer(out_dim, out_dim, activation='tanh') + + def forward(self, g, h, p, e, snorm_n): + h_in = h # for residual connection + p_in = p # for residual connection + + # Concating p to h, as in PEGNN + h = torch.cat((h, p), -1) + + if self.divide_input: + tower_outs = [tower(g, + h[:, n_tower * 2 * self.input_tower: (n_tower + 1) * 2 * self.input_tower], + p[:, n_tower * self.input_tower: (n_tower + 1) * self.input_tower], + e, + snorm_n) for n_tower, tower in enumerate(self.towers)] + h_tower_outs, p_tower_outs = map(list,zip(*tower_outs)) + h_cat = torch.cat(h_tower_outs, dim=1) + p_cat = torch.cat(p_tower_outs, dim=1) + else: + tower_outs = [tower(g, h, p, e, snorm_n) for tower in self.towers] + h_tower_outs, p_tower_outs = map(list,zip(*tower_outs)) + h_cat = torch.cat(h_tower_outs, dim=1) + p_cat = torch.cat(p_tower_outs, dim=1) + + h_out = self.mixing_network_h(h_cat) + p_out = self.mixing_network_p(p_cat) + + + if self.residual: + h_out = h_in + h_out # residual connection + p_out = p_in + p_out # residual connection + + return h_out, p_out + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim) + + + + + +# This layer file below has no towers +# and is similar to DGNLayerComplex used for best PNA score on MOLPCBA +# implemented here https://github.com/Saro00/DGN/blob/master/models/dgl/dgn_layer.py + +class PNANoTowersLSPELayer(nn.Module): + def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, + pretrans_layers, posttrans_layers, residual, edge_features, edge_dim=0, use_lapeig_loss=False): + super().__init__() + self.dropout = dropout + self.graph_norm = graph_norm + self.batch_norm = batch_norm + self.edge_features = edge_features + self.in_dim = in_dim + self.out_dim = out_dim + self.residual = residual + if in_dim != out_dim: + self.residual = False + + self.use_lapeig_loss = use_lapeig_loss + + self.batchnorm_h = nn.BatchNorm1d(out_dim) + + # retrieve the aggregators and scalers functions + aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] + scalers = [SCALERS[scale] for scale in scalers.split()] + + self.aggregators = aggregators + self.scalers = scalers + + if self.edge_features: + self.pretrans_h = MLP(in_size=2 * 2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') + self.pretrans_p = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, + out_size=in_dim, layers=pretrans_layers, mid_activation='tanh', last_activation='none') + + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers) + 2) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + self.posttrans_p = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='tanh', last_activation='none') + else: + self.posttrans_h = MLP(in_size=(len(aggregators) * len(scalers)) * 2 * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') + self.posttrans_p = MLP(in_size=(len(aggregators) * len(scalers)) * in_dim, hidden_size=out_dim, + out_size=out_dim, layers=posttrans_layers, mid_activation='tanh', last_activation='none') + + self.avg_d = avg_d + + def pretrans_edges(self, edges): + if self.edge_features: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) + z2_for_p = torch.cat([edges.src['p'], edges.dst['p'], edges.data['ef']], dim=1) + else: + z2_for_h = torch.cat([edges.src['h'], edges.dst['h']], dim=1) + z2_for_p = torch.cat([edges.src['p'], edges.dst['p']], dim=1) + + return {'e_for_h': self.pretrans_h(z2_for_h), 'e_for_p': self.pretrans_p(z2_for_p)} + + # Message func for h + def message_func_for_h(self, edges): + return {'e_for_h': edges.data['e_for_h']} + + # Reduce func for h + def reduce_func_for_h(self, nodes): + if self.edge_features: + h = nodes.mailbox['e_for_h'] + else: + h = nodes.mailbox['m_h'] + D = h.shape[-2] + h = torch.cat([aggregate(h) for aggregate in self.aggregators], dim=1) + if len(self.scalers) > 1: + h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'h': h} + + # Message func for p + def message_func_for_p(self, edges): + return {'e_for_p': edges.data['e_for_p']} + + # Reduce func for p + def reduce_func_for_p(self, nodes): + if self.edge_features: + p = nodes.mailbox['e_for_p'] + else: + p = nodes.mailbox['m_p'] + D = p.shape[-2] + p = torch.cat([aggregate(p) for aggregate in self.aggregators], dim=1) + if len(self.scalers) > 1: + p = torch.cat([scale(p, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) + return {'p': p} + + def forward(self, g, h, p, e, snorm_n): + + h = F.dropout(h, self.dropout, training=self.training) + p = F.dropout(p, self.dropout, training=self.training) + + h_in = h # for residual connection + p_in = p # for residual connection + + # Concating p to h, as in PEGNN + h = torch.cat((h, p), -1) + + g.ndata['h'] = h + g.ndata['p'] = p + + if self.edge_features: # add the edges information only if edge_features = True + g.edata['ef'] = e + + if self.edge_features: + # pretransformation + g.apply_edges(self.pretrans_edges) + + if self.edge_features: + # aggregation for h + g.update_all(self.message_func_for_h, self.reduce_func_for_h) + h = torch.cat([h, g.ndata['h']], dim=1) + + # aggregation for p + g.update_all(self.message_func_for_p, self.reduce_func_for_p) + p = torch.cat([p, g.ndata['p']], dim=1) + else: + # aggregation for h + g.update_all(fn.copy_u('h', 'm_h'), self.reduce_func_for_h) + h = g.ndata['h'] + + # aggregation for p + g.update_all(fn.copy_u('p', 'm_p'), self.reduce_func_for_p) + p = g.ndata['p'] + + # posttransformation + h = self.posttrans_h(h) + p = self.posttrans_p(p) + + # graph and batch normalization + if self.graph_norm and self.edge_features: + h = h * snorm_n + + if self.batch_norm: + h = self.batchnorm_h(h) + + h = F.relu(h) + p = torch.tanh(h) + + if self.residual: + h = h_in + h # residual connection + h = p_in + p # residual connection + + return h, p + + def __repr__(self): + return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim) \ No newline at end of file diff --git a/layers/pna_utils.py b/layers/pna_utils.py new file mode 100644 index 0000000..61c9fd5 --- /dev/null +++ b/layers/pna_utils.py @@ -0,0 +1,407 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + + +# PNA Aggregators ------------------------------------------------------------------------------ + +EPS = 1e-5 + + +def aggregate_mean(h): + return torch.mean(h, dim=1) + + +def aggregate_max(h): + return torch.max(h, dim=1)[0] + + +def aggregate_min(h): + return torch.min(h, dim=1)[0] + + +def aggregate_std(h): + return torch.sqrt(aggregate_var(h) + EPS) + + +def aggregate_var(h): + h_mean_squares = torch.mean(h * h, dim=-2) + h_mean = torch.mean(h, dim=-2) + var = torch.relu(h_mean_squares - h_mean * h_mean) + return var + + +def aggregate_moment(h, n=3): + # for each node (E[(X-E[X])^n])^{1/n} + # EPS is added to the absolute value of expectation before taking the nth root for stability + h_mean = torch.mean(h, dim=1, keepdim=True) + h_n = torch.mean(torch.pow(h - h_mean, n)) + rooted_h_n = torch.sign(h_n) * torch.pow(torch.abs(h_n) + EPS, 1. / n) + return rooted_h_n + + +def aggregate_moment_3(h): + return aggregate_moment(h, n=3) + + +def aggregate_moment_4(h): + return aggregate_moment(h, n=4) + + +def aggregate_moment_5(h): + return aggregate_moment(h, n=5) + + +def aggregate_sum(h): + return torch.sum(h, dim=1) + + +AGGREGATORS = {'mean': aggregate_mean, 'sum': aggregate_sum, 'max': aggregate_max, 'min': aggregate_min, + 'std': aggregate_std, 'var': aggregate_var, 'moment3': aggregate_moment_3, 'moment4': aggregate_moment_4, + 'moment5': aggregate_moment_5} + + + + +# PNA Scalers --------------------------------------------------------------------------------- + + +# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and +# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output + +def scale_identity(h, D=None, avg_d=None): + return h + + +def scale_amplification(h, D, avg_d): + # log(D + 1) / d * h where d is the average of the ``log(D + 1)`` in the training set + return h * (np.log(D + 1) / avg_d["log"]) + + +def scale_attenuation(h, D, avg_d): + # (log(D + 1))^-1 / d * X where d is the average of the ``log(D + 1))^-1`` in the training set + return h * (avg_d["log"] / np.log(D + 1)) + + +SCALERS = {'identity': scale_identity, 'amplification': scale_amplification, 'attenuation': scale_attenuation} + + + + + +import torch +import torch.nn as nn +import torch.nn.functional as F + +SUPPORTED_ACTIVATION_MAP = {'ReLU', 'Sigmoid', 'Tanh', 'ELU', 'SELU', 'GLU', 'LeakyReLU', 'Softplus', 'None'} + + +def get_activation(activation): + """ returns the activation function represented by the input string """ + if activation and callable(activation): + # activation is already a function + return activation + # search in SUPPORTED_ACTIVATION_MAP a torch.nn.modules.activation + activation = [x for x in SUPPORTED_ACTIVATION_MAP if activation.lower() == x.lower()] + assert len(activation) == 1 and isinstance(activation[0], str), 'Unhandled activation function' + activation = activation[0] + if activation.lower() == 'none': + return None + return vars(torch.nn.modules.activation)[activation]() + + +class Set2Set(torch.nn.Module): + r""" + Set2Set global pooling operator from the `"Order Matters: Sequence to sequence for sets" + `_ paper. This pooling layer performs the following operation + + .. math:: + \mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1}) + + \alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t) + + \mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i + + \mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t, + + where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice + the dimensionality as the input. + + Arguments + --------- + input_dim: int + Size of each input sample. + hidden_dim: int, optional + the dim of set representation which corresponds to the input dim of the LSTM in Set2Set. + This is typically the sum of the input dim and the lstm output dim. If not provided, it will be set to :obj:`input_dim*2` + steps: int, optional + Number of iterations :math:`T`. If not provided, the number of nodes will be used. + num_layers : int, optional + Number of recurrent layers (e.g., :obj:`num_layers=2` would mean stacking two LSTMs together) + (Default, value = 1) + """ + + def __init__(self, nin, nhid=None, steps=None, num_layers=1, activation=None, device='cpu'): + super(Set2Set, self).__init__() + self.steps = steps + self.nin = nin + self.nhid = nin * 2 if nhid is None else nhid + if self.nhid <= self.nin: + raise ValueError('Set2Set hidden_dim should be larger than input_dim') + # the hidden is a concatenation of weighted sum of embedding and LSTM output + self.lstm_output_dim = self.nhid - self.nin + self.num_layers = num_layers + self.lstm = nn.LSTM(self.nhid, self.nin, num_layers=num_layers, batch_first=True).to(device) + self.softmax = nn.Softmax(dim=1) + + def forward(self, x): + r""" + Applies the pooling on input tensor x + + Arguments + ---------- + x: torch.FloatTensor + Input tensor of size (B, N, D) + + Returns + ------- + x: `torch.FloatTensor` + Tensor resulting from the set2set pooling operation. + """ + + batch_size = x.shape[0] + n = self.steps or x.shape[1] + + h = (x.new_zeros((self.num_layers, batch_size, self.nin)), + x.new_zeros((self.num_layers, batch_size, self.nin))) + + q_star = x.new_zeros(batch_size, 1, self.nhid) + + for i in range(n): + # q: batch_size x 1 x input_dim + q, h = self.lstm(q_star, h) + # e: batch_size x n x 1 + e = torch.matmul(x, torch.transpose(q, 1, 2)) + a = self.softmax(e) + r = torch.sum(a * x, dim=1, keepdim=True) + q_star = torch.cat([q, r], dim=-1) + + return torch.squeeze(q_star, dim=1) + + +class FCLayer(nn.Module): + r""" + A simple fully connected and customizable layer. This layer is centered around a torch.nn.Linear module. + The order in which transformations are applied is: + + #. Dense Layer + #. Activation + #. Dropout (if applicable) + #. Batch Normalization (if applicable) + + Arguments + ---------- + in_size: int + Input dimension of the layer (the torch.nn.Linear) + out_size: int + Output dimension of the layer. + dropout: float, optional + The ratio of units to dropout. No dropout by default. + (Default value = 0.) + activation: str or callable, optional + Activation function to use. + (Default value = relu) + b_norm: bool, optional + Whether to use batch normalization + (Default value = False) + bias: bool, optional + Whether to enable bias in for the linear layer. + (Default value = True) + init_fn: callable, optional + Initialization function to use for the weight of the layer. Default is + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` with :math:`k=\frac{1}{ \text{in_size}}` + (Default value = None) + + Attributes + ---------- + dropout: int + The ratio of units to dropout. + b_norm: int + Whether to use batch normalization + linear: torch.nn.Linear + The linear layer + activation: the torch.nn.Module + The activation layer + init_fn: function + Initialization function used for the weight of the layer + in_size: int + Input dimension of the linear layer + out_size: int + Output dimension of the linear layer + """ + + def __init__(self, in_size, out_size, activation='relu', dropout=0., b_norm=False, bias=True, init_fn=None, + device='cpu'): + super(FCLayer, self).__init__() + + self.__params = locals() + del self.__params['__class__'] + del self.__params['self'] + self.in_size = in_size + self.out_size = out_size + self.bias = bias + self.linear = nn.Linear(in_size, out_size, bias=bias).to(device) + self.dropout = None + self.b_norm = None + if dropout: + self.dropout = nn.Dropout(p=dropout) + if b_norm: + self.b_norm = nn.BatchNorm1d(out_size).to(device) + self.activation = get_activation(activation) + self.init_fn = nn.init.xavier_uniform_ + + self.reset_parameters() + + def reset_parameters(self, init_fn=None): + init_fn = init_fn or self.init_fn + if init_fn is not None: + init_fn(self.linear.weight, 1 / self.in_size) + if self.bias: + self.linear.bias.data.zero_() + + def forward(self, x): + h = self.linear(x) + if self.activation is not None: + h = self.activation(h) + if self.dropout is not None: + h = self.dropout(h) + if self.b_norm is not None: + if h.shape[1] != self.out_size: + h = self.b_norm(h.transpose(1, 2)).transpose(1, 2) + else: + h = self.b_norm(h) + return h + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_size) + ' -> ' \ + + str(self.out_size) + ')' + + +class MLP(nn.Module): + """ + Simple multi-layer perceptron, built of a series of FCLayers + """ + + def __init__(self, in_size, hidden_size, out_size, layers, mid_activation='relu', last_activation='none', + dropout=0., mid_b_norm=False, last_b_norm=False, device='cpu'): + super(MLP, self).__init__() + + self.in_size = in_size + self.hidden_size = hidden_size + self.out_size = out_size + + self.fully_connected = nn.ModuleList() + if layers <= 1: + self.fully_connected.append(FCLayer(in_size, out_size, activation=last_activation, b_norm=last_b_norm, + device=device, dropout=dropout)) + else: + self.fully_connected.append(FCLayer(in_size, hidden_size, activation=mid_activation, b_norm=mid_b_norm, + device=device, dropout=dropout)) + for _ in range(layers - 2): + self.fully_connected.append(FCLayer(hidden_size, hidden_size, activation=mid_activation, + b_norm=mid_b_norm, device=device, dropout=dropout)) + self.fully_connected.append(FCLayer(hidden_size, out_size, activation=last_activation, b_norm=last_b_norm, + device=device, dropout=dropout)) + + def forward(self, x): + for fc in self.fully_connected: + x = fc(x) + return x + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_size) + ' -> ' \ + + str(self.out_size) + ')' + + +class GRU(nn.Module): + """ + Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself + """ + + def __init__(self, input_size, hidden_size, device): + super(GRU, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size).to(device) + + def forward(self, x, y): + """ + :param x: shape: (B, N, Din) where Din <= input_size (difference is padded) + :param y: shape: (B, N, Dh) where Dh <= hidden_size (difference is padded) + :return: shape: (B, N, Dh) + """ + assert (x.shape[-1] <= self.input_size and y.shape[-1] <= self.hidden_size) + + (B, N, _) = x.shape + x = x.reshape(1, B * N, -1).contiguous() + y = y.reshape(1, B * N, -1).contiguous() + + # padding if necessary + if x.shape[-1] < self.input_size: + x = F.pad(input=x, pad=[0, self.input_size - x.shape[-1]], mode='constant', value=0) + if y.shape[-1] < self.hidden_size: + y = F.pad(input=y, pad=[0, self.hidden_size - y.shape[-1]], mode='constant', value=0) + + x = self.gru(x, y)[1] + x = x.reshape(B, N, -1) + return x + + +class S2SReadout(nn.Module): + """ + Performs a Set2Set aggregation of all the graph nodes' features followed by a series of fully connected layers + """ + + def __init__(self, in_size, hidden_size, out_size, fc_layers=3, device='cpu', final_activation='relu'): + super(S2SReadout, self).__init__() + + # set2set aggregation + self.set2set = Set2Set(in_size, device=device) + + # fully connected layers + self.mlp = MLP(in_size=2 * in_size, hidden_size=hidden_size, out_size=out_size, layers=fc_layers, + mid_activation="relu", last_activation=final_activation, mid_b_norm=True, last_b_norm=False, + device=device) + + def forward(self, x): + x = self.set2set(x) + return self.mlp(x) + + + +class GRU(nn.Module): + """ + Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself + """ + + def __init__(self, input_size, hidden_size, device): + super(GRU, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size).to(device) + + def forward(self, x, y): + """ + :param x: shape: (B, N, Din) where Din <= input_size (difference is padded) + :param y: shape: (B, N, Dh) where Dh <= hidden_size (difference is padded) + :return: shape: (B, N, Dh) + """ + assert (x.shape[-1] <= self.input_size and y.shape[-1] <= self.hidden_size) + x = x.unsqueeze(0) + y = y.unsqueeze(0) + x = self.gru(x, y)[1] + x = x.squeeze() + return x diff --git a/layers/san_gt_layer.py b/layers/san_gt_layer.py new file mode 100644 index 0000000..e71f1b5 --- /dev/null +++ b/layers/san_gt_layer.py @@ -0,0 +1,267 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import dgl.function as fn +import numpy as np + +""" + SAN-GT + +""" + +""" + Util functions +""" +def src_dot_dst(src_field, dst_field, out_field): + def func(edges): + return {out_field: (edges.src[src_field] * edges.dst[dst_field])} + return func + + +def scaling(field, scale_constant): + def func(edges): + return {field: ((edges.data[field]) / scale_constant)} + return func + +# Improving implicit attention scores with explicit edge features, if available +def imp_exp_attn(implicit_attn, explicit_edge): + """ + implicit_attn: the output of K Q + explicit_edge: the explicit edge features + """ + def func(edges): + return {implicit_attn: (edges.data[implicit_attn] * edges.data[explicit_edge])} + return func + +def exp_real(field, L): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))/(L+1)} + return func + + +def exp_fake(field, L): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': L*torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))/(L+1)} + return func + +def exp(field): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))} + return func + + +""" + Single Attention Head +""" + +class MultiHeadAttentionLayer(nn.Module): + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, use_bias, attention_for): + super().__init__() + + + self.out_dim = out_dim + self.num_heads = num_heads + self.full_graph=full_graph + self.attention_for = attention_for + self.gamma = gamma + + if self.attention_for == "h": + if use_bias: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + + def propagate_attention(self, g): + + + if self.full_graph: + real_ids = torch.nonzero(g.edata['real']).squeeze() + fake_ids = torch.nonzero(g.edata['real']==0).squeeze() + + else: + real_ids = g.edges(form='eid') + + g.apply_edges(src_dot_dst('K_h', 'Q_h', 'score'), edges=real_ids) + + if self.full_graph: + g.apply_edges(src_dot_dst('K_2h', 'Q_2h', 'score'), edges=fake_ids) + + + # scale scores by sqrt(d) + g.apply_edges(scaling('score', np.sqrt(self.out_dim))) + + # Use available edge features to modify the scores for edges + g.apply_edges(imp_exp_attn('score', 'E'), edges=real_ids) + + if self.full_graph: + g.apply_edges(imp_exp_attn('score', 'E_2'), edges=fake_ids) + + + if self.full_graph: + # softmax and scaling by gamma + L = torch.clamp(self.gamma, min=0.0, max=1.0) # Gamma \in [0,1] + g.apply_edges(exp_real('score', L), edges=real_ids) + g.apply_edges(exp_fake('score', L), edges=fake_ids) + + else: + g.apply_edges(exp('score'), edges=real_ids) + + # Send weighted values to target nodes + eids = g.edges() + g.send_and_recv(eids, fn.src_mul_edge('V_h', 'score_soft', 'V_h'), fn.sum('V_h', 'wV')) + g.send_and_recv(eids, fn.copy_edge('score_soft', 'score_soft'), fn.sum('score_soft', 'z')) + + + def forward(self, g, h, e): + + Q_h = self.Q(h) + K_h = self.K(h) + E = self.E(e) + + if self.full_graph: + Q_2h = self.Q_2(h) + K_2h = self.K_2(h) + E_2 = self.E_2(e) + + V_h = self.V(h) + + + # Reshaping into [num_nodes, num_heads, feat_dim] to + # get projections for multi-head attention + g.ndata['Q_h'] = Q_h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_h'] = K_h.view(-1, self.num_heads, self.out_dim) + g.edata['E'] = E.view(-1, self.num_heads, self.out_dim) + + + if self.full_graph: + g.ndata['Q_2h'] = Q_2h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_2h'] = K_2h.view(-1, self.num_heads, self.out_dim) + g.edata['E_2'] = E_2.view(-1, self.num_heads, self.out_dim) + + g.ndata['V_h'] = V_h.view(-1, self.num_heads, self.out_dim) + + self.propagate_attention(g) + + h_out = g.ndata['wV'] / (g.ndata['z'] + torch.full_like(g.ndata['z'], 1e-6)) + + return h_out + + +class SAN_GT_Layer(nn.Module): + """ + Param: + """ + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, dropout=0.0, + layer_norm=False, batch_norm=True, residual=True, use_bias=False): + super().__init__() + + self.in_channels = in_dim + self.out_channels = out_dim + self.num_heads = num_heads + self.dropout = dropout + self.residual = residual + self.layer_norm = layer_norm + self.batch_norm = batch_norm + + self.attention_h = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, attention_for="h") + + self.O_h = nn.Linear(out_dim, out_dim) + + if self.layer_norm: + self.layer_norm1_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm1_h = nn.BatchNorm1d(out_dim) + + # FFN for h + self.FFN_h_layer1 = nn.Linear(out_dim, out_dim*2) + self.FFN_h_layer2 = nn.Linear(out_dim*2, out_dim) + + if self.layer_norm: + self.layer_norm2_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm2_h = nn.BatchNorm1d(out_dim) + + + def forward(self, g, h, p, e, snorm_n): + h_in1 = h # for first residual connection + + # [START] For calculation of h ----------------------------------------------------------------- + + # multi-head attention out + h_attn_out = self.attention_h(g, h, e) + + #Concat multi-head outputs + h = h_attn_out.view(-1, self.out_channels) + + h = F.dropout(h, self.dropout, training=self.training) + + h = self.O_h(h) + + if self.residual: + h = h_in1 + h # residual connection + + # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm1_h(h) + + if self.batch_norm: + h = self.batch_norm1_h(h) + + h_in2 = h # for second residual connection + + # FFN for h + h = self.FFN_h_layer1(h) + h = F.relu(h) + h = F.dropout(h, self.dropout, training=self.training) + h = self.FFN_h_layer2(h) + + if self.residual: + h = h_in2 + h # residual connection + + # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm2_h(h) + + if self.batch_norm: + h = self.batch_norm2_h(h) + + # [END] For calculation of h ----------------------------------------------------------------- + + return h, None + + def __repr__(self): + return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels, self.num_heads, self.residual) \ No newline at end of file diff --git a/layers/san_gt_lspe_layer.py b/layers/san_gt_lspe_layer.py new file mode 100644 index 0000000..e18361c --- /dev/null +++ b/layers/san_gt_lspe_layer.py @@ -0,0 +1,318 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import dgl.function as fn +import numpy as np + +""" + SAN-GT-LSPE: SAN-GT with LSPE + +""" + +""" + Util functions +""" +def src_dot_dst(src_field, dst_field, out_field): + def func(edges): + return {out_field: (edges.src[src_field] * edges.dst[dst_field])} + return func + + +def scaling(field, scale_constant): + def func(edges): + return {field: ((edges.data[field]) / scale_constant)} + return func + +# Improving implicit attention scores with explicit edge features, if available +def imp_exp_attn(implicit_attn, explicit_edge): + """ + implicit_attn: the output of K Q + explicit_edge: the explicit edge features + """ + def func(edges): + return {implicit_attn: (edges.data[implicit_attn] * edges.data[explicit_edge])} + return func + +def exp_real(field, L): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))/(L+1)} + return func + + +def exp_fake(field, L): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': L*torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))/(L+1)} + return func + +def exp(field): + def func(edges): + # clamp for softmax numerical stability + return {'score_soft': torch.exp((edges.data[field].sum(-1, keepdim=True)).clamp(-5, 5))} + return func + + +""" + Single Attention Head +""" + +class MultiHeadAttentionLayer(nn.Module): + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, use_bias, attention_for): + super().__init__() + + + self.out_dim = out_dim + self.num_heads = num_heads + self.full_graph=full_graph + self.attention_for = attention_for + self.gamma = gamma + + if self.attention_for == "h": # attention module for h has input h = [h,p], so 2*in_dim for Q,K,V + if use_bias: + self.Q = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim*2, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim*2, out_dim * num_heads, bias=False) + + elif self.attention_for == "p": # attention module for p + if use_bias: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=True) + + else: + self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + if self.full_graph: + self.Q_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.K_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + self.E_2 = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + self.V = nn.Linear(in_dim, out_dim * num_heads, bias=False) + + def propagate_attention(self, g): + + if self.full_graph: + real_ids = torch.nonzero(g.edata['real']).squeeze() + fake_ids = torch.nonzero(g.edata['real']==0).squeeze() + + else: + real_ids = g.edges(form='eid') + + g.apply_edges(src_dot_dst('K_h', 'Q_h', 'score'), edges=real_ids) + + if self.full_graph: + g.apply_edges(src_dot_dst('K_2h', 'Q_2h', 'score'), edges=fake_ids) + + + # scale scores by sqrt(d) + g.apply_edges(scaling('score', np.sqrt(self.out_dim))) + + # Use available edge features to modify the scores for edges + g.apply_edges(imp_exp_attn('score', 'E'), edges=real_ids) + + if self.full_graph: + g.apply_edges(imp_exp_attn('score', 'E_2'), edges=fake_ids) + + + if self.full_graph: + # softmax and scaling by gamma + L = torch.clamp(self.gamma, min=0.0, max=1.0) # Gamma \in [0,1] + g.apply_edges(exp_real('score', L), edges=real_ids) + g.apply_edges(exp_fake('score', L), edges=fake_ids) + + else: + g.apply_edges(exp('score'), edges=real_ids) + + # Send weighted values to target nodes + eids = g.edges() + g.send_and_recv(eids, fn.src_mul_edge('V_h', 'score_soft', 'V_h'), fn.sum('V_h', 'wV')) + g.send_and_recv(eids, fn.copy_edge('score_soft', 'score_soft'), fn.sum('score_soft', 'z')) + + + def forward(self, g, h, p, e): + if self.attention_for == "h": + h = torch.cat((h, p), -1) + elif self.attention_for == "p": + h = p + + Q_h = self.Q(h) + K_h = self.K(h) + E = self.E(e) + + if self.full_graph: + Q_2h = self.Q_2(h) + K_2h = self.K_2(h) + E_2 = self.E_2(e) + + V_h = self.V(h) + + + # Reshaping into [num_nodes, num_heads, feat_dim] to + # get projections for multi-head attention + g.ndata['Q_h'] = Q_h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_h'] = K_h.view(-1, self.num_heads, self.out_dim) + g.edata['E'] = E.view(-1, self.num_heads, self.out_dim) + + + if self.full_graph: + g.ndata['Q_2h'] = Q_2h.view(-1, self.num_heads, self.out_dim) + g.ndata['K_2h'] = K_2h.view(-1, self.num_heads, self.out_dim) + g.edata['E_2'] = E_2.view(-1, self.num_heads, self.out_dim) + + g.ndata['V_h'] = V_h.view(-1, self.num_heads, self.out_dim) + + self.propagate_attention(g) + + h_out = g.ndata['wV'] / (g.ndata['z'] + torch.full_like(g.ndata['z'], 1e-6)) + + return h_out + + +class SAN_GT_LSPE_Layer(nn.Module): + """ + Param: + """ + def __init__(self, gamma, in_dim, out_dim, num_heads, full_graph, dropout=0.0, + layer_norm=False, batch_norm=True, residual=True, use_bias=False): + super().__init__() + + self.in_channels = in_dim + self.out_channels = out_dim + self.num_heads = num_heads + self.dropout = dropout + self.residual = residual + self.layer_norm = layer_norm + self.batch_norm = batch_norm + + self.attention_h = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, attention_for="h") + self.attention_p = MultiHeadAttentionLayer(gamma, in_dim, out_dim//num_heads, num_heads, + full_graph, use_bias, attention_for="p") + + self.O_h = nn.Linear(out_dim, out_dim) + self.O_p = nn.Linear(out_dim, out_dim) + + if self.layer_norm: + self.layer_norm1_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm1_h = nn.BatchNorm1d(out_dim) + + # FFN for h + self.FFN_h_layer1 = nn.Linear(out_dim, out_dim*2) + self.FFN_h_layer2 = nn.Linear(out_dim*2, out_dim) + + if self.layer_norm: + self.layer_norm2_h = nn.LayerNorm(out_dim) + + if self.batch_norm: + self.batch_norm2_h = nn.BatchNorm1d(out_dim) + + + def forward(self, g, h, p, e, snorm_n): + h_in1 = h # for first residual connection + p_in1 = p # for first residual connection + + # [START] For calculation of h ----------------------------------------------------------------- + + # multi-head attention out + h_attn_out = self.attention_h(g, h, p, e) + + #Concat multi-head outputs + h = h_attn_out.view(-1, self.out_channels) + + h = F.dropout(h, self.dropout, training=self.training) + + h = self.O_h(h) + + if self.residual: + h = h_in1 + h # residual connection + + # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm1_h(h) + + if self.batch_norm: + h = self.batch_norm1_h(h) + + h_in2 = h # for second residual connection + + # FFN for h + h = self.FFN_h_layer1(h) + h = F.relu(h) + h = F.dropout(h, self.dropout, training=self.training) + h = self.FFN_h_layer2(h) + + if self.residual: + h = h_in2 + h # residual connection + + # GN from benchmarking-gnns-v1 + # h = h * snorm_n + + if self.layer_norm: + h = self.layer_norm2_h(h) + + if self.batch_norm: + h = self.batch_norm2_h(h) + + # [END] For calculation of h ----------------------------------------------------------------- + + + # [START] For calculation of p ----------------------------------------------------------------- + + # multi-head attention out + p_attn_out = self.attention_p(g, None, p, e) + + #Concat multi-head outputs + p = p_attn_out.view(-1, self.out_channels) + + p = F.dropout(p, self.dropout, training=self.training) + + p = self.O_p(p) + + p = torch.tanh(p) + + if self.residual: + p = p_in1 + p # residual connection + + # [END] For calculation of p ----------------------------------------------------------------- + + return h, p + + def __repr__(self): + return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__, + self.in_channels, + self.out_channels, self.num_heads, self.residual) \ No newline at end of file diff --git a/main_OGBMOL_graph_classification.py b/main_OGBMOL_graph_classification.py new file mode 100644 index 0000000..e12eedc --- /dev/null +++ b/main_OGBMOL_graph_classification.py @@ -0,0 +1,504 @@ + + + + + +""" + IMPORTING LIBS +""" +import dgl + +import numpy as np +import os +import socket +import time +import random +import glob +import argparse, json + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch.optim as optim +from torch.utils.data import DataLoader + +from tensorboardX import SummaryWriter +from tqdm import tqdm + +import matplotlib +import matplotlib.pyplot as plt + +class DotDict(dict): + def __init__(self, **kwds): + self.update(kwds) + self.__dict__ = self + + + + + + +""" + IMPORTING CUSTOM MODULES/METHODS +""" + +from nets.OGBMOL_graph_classification.load_net import gnn_model # import GNNs +from data.data import LoadData # import dataset + + + + +""" + GPU Setup +""" +def gpu_setup(use_gpu, gpu_id): + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) + + if torch.cuda.is_available() and use_gpu: + print('cuda available with GPU:',torch.cuda.get_device_name(0)) + device = torch.device("cuda") + else: + print('cuda not available') + device = torch.device("cpu") + return device + + + + + + + +""" + VIEWING MODEL CONFIG AND PARAMS +""" +def view_model_param(MODEL_NAME, net_params): + model = gnn_model(MODEL_NAME, net_params) + total_param = 0 + print("MODEL DETAILS:\n") + # print(model) + for param in model.parameters(): + # print(param.data.size()) + total_param += np.prod(list(param.data.size())) + print('MODEL/Total parameters:', MODEL_NAME, total_param) + return total_param + + +def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs): + t0 = time.time() + per_epoch_time = [] + + DATASET_NAME = dataset.name + + if net_params['pe_init'] == 'lap_pe': + tt = time.time() + print("[!] -LapPE: Initializing graph positional encoding with Laplacian PE.") + dataset._add_lap_positional_encodings(net_params['pos_enc_dim']) + print("[!] Time taken: ", time.time()-tt) + elif net_params['pe_init'] == 'rand_walk': + tt = time.time() + print("[!] -LSPE: Initializing graph positional encoding with rand walk features.") + dataset._init_positional_encodings(net_params['pos_enc_dim'], net_params['pe_init']) + print("[!] Time taken: ", time.time()-tt) + + tt = time.time() + print("[!] -LSPE (For viz later): Adding lapeigvecs to key 'eigvec' for every graph.") + dataset._add_eig_vecs(net_params['pos_enc_dim']) + print("[!] Time taken: ", time.time()-tt) + + if MODEL_NAME in ['SAN', 'GraphiT']: + if net_params['full_graph']: + st = time.time() + print("[!] Adding full graph connectivity..") + dataset._make_full_graph() if MODEL_NAME == 'SAN' else dataset._make_full_graph((net_params['p_steps'], net_params['gamma'])) + print('Time taken to add full graph connectivity: ',time.time()-st) + + trainset, valset, testset = dataset.train, dataset.val, dataset.test + + evaluator = dataset.evaluator + + root_log_dir, root_ckpt_dir, write_file_name, write_config_file, viz_dir = dirs + device = net_params['device'] + + # Write the network and optimization hyper-parameters in folder config/ + with open(write_config_file + '.txt', 'w') as f: + f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param'])) + + log_dir = os.path.join(root_log_dir, "RUN_" + str(0)) + writer = SummaryWriter(log_dir=log_dir) + + # setting seeds + random.seed(params['seed']) + np.random.seed(params['seed']) + torch.manual_seed(params['seed']) + if device.type == 'cuda': + torch.cuda.manual_seed(params['seed']) + torch.cuda.manual_seed_all(params['seed']) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + print("Training Graphs: ", len(trainset)) + print("Validation Graphs: ", len(valset)) + print("Test Graphs: ", len(testset)) + + model = gnn_model(MODEL_NAME, net_params) + model = model.to(device) + + optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', + factor=params['lr_reduce_factor'], + patience=params['lr_schedule_patience'], + verbose=True) + + epoch_train_losses, epoch_val_losses = [], [] + epoch_train_accs, epoch_val_accs, epoch_test_accs = [], [], [] + + # import train functions for all GNNs + from train.train_OGBMOL_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network + + train_loader = DataLoader(trainset, num_workers=4, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate, pin_memory=True) + val_loader = DataLoader(valset, num_workers=4, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate, pin_memory=True) + test_loader = DataLoader(testset, num_workers=4, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate, pin_memory=True) + + # At any point you can hit Ctrl + C to break out of training early. + try: + with tqdm(range(params['epochs'])) as t: + for epoch in t: + + t.set_description('Epoch %d' % epoch) + + start = time.time() + + epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, evaluator) + + epoch_val_loss, epoch_val_acc, __ = evaluate_network(model, device, val_loader, epoch, evaluator) + _, epoch_test_acc, __ = evaluate_network(model, device, test_loader, epoch, evaluator) + del __ + + epoch_train_losses.append(epoch_train_loss) + epoch_val_losses.append(epoch_val_loss) + epoch_train_accs.append(epoch_train_acc) + epoch_val_accs.append(epoch_val_acc) + epoch_test_accs.append(epoch_test_acc) + + writer.add_scalar('train/_loss', epoch_train_loss, epoch) + writer.add_scalar('val/_loss', epoch_val_loss, epoch) + writer.add_scalar('train/_avg_prec', epoch_train_acc, epoch) + writer.add_scalar('val/_avg_prec', epoch_val_acc, epoch) + writer.add_scalar('test/_avg_prec', epoch_test_acc, epoch) + writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) + + if dataset.name == "ogbg-moltox21": + t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'], + train_loss=epoch_train_loss, val_loss=epoch_val_loss, + train_AUC=epoch_train_acc, val_AUC=epoch_val_acc, + test_AUC=epoch_test_acc) + elif dataset.name == "ogbg-molpcba": + t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'], + train_loss=epoch_train_loss, val_loss=epoch_val_loss, + train_AP=epoch_train_acc, val_AP=epoch_val_acc, + test_AP=epoch_test_acc) + + per_epoch_time.append(time.time()-start) + + # Saving checkpoint + ckpt_dir = os.path.join(root_ckpt_dir, "RUN_") + if not os.path.exists(ckpt_dir): + os.makedirs(ckpt_dir) + torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch))) + + files = glob.glob(ckpt_dir + '/*.pkl') + for file in files: + epoch_nb = file.split('_')[-1] + epoch_nb = int(epoch_nb.split('.')[0]) + if epoch_nb < epoch-1: + os.remove(file) + + scheduler.step(epoch_val_loss) + + if optimizer.param_groups[0]['lr'] < params['min_lr']: + print("\n!! LR EQUAL TO MIN LR SET.") + break + + # Stop training after params['max_time'] hours + if time.time()-t0 > params['max_time']*3600: + print('-' * 89) + print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time'])) + break + + except KeyboardInterrupt: + print('-' * 89) + print('Exiting from training early because of KeyboardInterrupt') + + # ___, __, g_outs_train = evaluate_network(model, device, train_loader, epoch, evaluator) + ___, __, g_outs_test = evaluate_network(model, device, test_loader, epoch, evaluator) + del ___ + del __ + + # OGB: Test scores at best val epoch + epoch_best = epoch_val_accs.index(max(epoch_val_accs)) + + test_acc = epoch_test_accs[epoch_best] + train_acc = epoch_train_accs[epoch_best] + val_acc = epoch_val_accs[epoch_best] + + if dataset.name == "ogbg-moltox21": + print("Test AUC: {:.4f}".format(test_acc)) + print("Train AUC: {:.4f}".format(train_acc)) + print("Val AUC: {:.4f}".format(val_acc)) + elif dataset.name == "ogbg-molpcba": + print("Test Avg Precision: {:.4f}".format(test_acc)) + print("Train Avg Precision: {:.4f}".format(train_acc)) + print("Convergence Time (Epochs): {:.4f}".format(epoch)) + print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0)) + print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time))) + + if net_params['pe_init'] == 'rand_walk' and g_outs_test is not None: + # Visualize actual and predicted/learned eigenvecs + from utils.plot_util import plot_graph_eigvec + if not os.path.exists(viz_dir): + os.makedirs(viz_dir) + + sample_graph_ids = [153,103,123] + + for f_idx, graph_id in enumerate(sample_graph_ids): + + # Test graphs + g_dgl = g_outs_test[graph_id] + + f = plt.figure(f_idx, figsize=(12,6)) + + plt1 = f.add_subplot(121) + plot_graph_eigvec(plt1, graph_id, g_dgl, feature_key='eigvec', actual_eigvecs=True) + + plt2 = f.add_subplot(122) + plot_graph_eigvec(plt2, graph_id, g_dgl, feature_key='p', predicted_eigvecs=True) + + f.savefig(viz_dir+'/test'+str(graph_id)+'.jpg') + + writer.close() + + """ + Write the results in out_dir/results folder + """ + if dataset.name == "ogbg-moltox21": + with open(write_file_name + '.txt', 'w') as f: + f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n + FINAL RESULTS\nTEST AUC: {:.4f}\nTRAIN AUC: {:.4f}\nVAL AUC: {:.4f}\n\n + Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ + .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], + test_acc, train_acc, val_acc, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time))) + elif dataset.name == "ogbg-molpcba": + with open(write_file_name + '.txt', 'w') as f: + f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n + FINAL RESULTS\nTEST AVG PRECISION: {:.4f}\nTRAIN AVG PRECISION: {:.4f}\nVAL AVG PRECISION: {:.4f}\n\n + Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ + .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], + test_acc, train_acc, val_acc, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time))) + + + + +def main(): + """ + USER CONTROLS + """ + + + parser = argparse.ArgumentParser() + parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details") + parser.add_argument('--gpu_id', help="Please give a value for gpu id") + parser.add_argument('--model', help="Please give a value for model name") + parser.add_argument('--dataset', help="Please give a value for dataset name") + parser.add_argument('--out_dir', help="Please give a value for out_dir") + parser.add_argument('--seed', help="Please give a value for seed") + parser.add_argument('--epochs', help="Please give a value for epochs") + parser.add_argument('--batch_size', help="Please give a value for batch_size") + parser.add_argument('--init_lr', help="Please give a value for init_lr") + parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor") + parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience") + parser.add_argument('--min_lr', help="Please give a value for min_lr") + parser.add_argument('--weight_decay', help="Please give a value for weight_decay") + parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval") + parser.add_argument('--L', help="Please give a value for L") + parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim") + parser.add_argument('--out_dim', help="Please give a value for out_dim") + parser.add_argument('--residual', help="Please give a value for residual") + parser.add_argument('--edge_feat', help="Please give a value for edge_feat") + parser.add_argument('--readout', help="Please give a value for readout") + parser.add_argument('--kernel', help="Please give a value for kernel") + parser.add_argument('--n_heads', help="Please give a value for n_heads") + parser.add_argument('--gated', help="Please give a value for gated") + parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout") + parser.add_argument('--dropout', help="Please give a value for dropout") + parser.add_argument('--layer_norm', help="Please give a value for layer_norm") + parser.add_argument('--batch_norm', help="Please give a value for batch_norm") + parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator") + parser.add_argument('--data_mode', help="Please give a value for data_mode") + parser.add_argument('--num_pool', help="Please give a value for num_pool") + parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block") + parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim") + parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio") + parser.add_argument('--linkpred', help="Please give a value for linkpred") + parser.add_argument('--cat', help="Please give a value for cat") + parser.add_argument('--self_loop', help="Please give a value for self_loop") + parser.add_argument('--max_time', help="Please give a value for max_time") + parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim") + parser.add_argument('--alpha_loss', help="Please give a value for alpha_loss") + parser.add_argument('--lambda_loss', help="Please give a value for lambda_loss") + parser.add_argument('--pe_init', help="Please give a value for pe_init") + args = parser.parse_args() + with open(args.config) as f: + config = json.load(f) + # device + if args.gpu_id is not None: + config['gpu']['id'] = int(args.gpu_id) + config['gpu']['use'] = True + device = gpu_setup(config['gpu']['use'], config['gpu']['id']) + # model, dataset, out_dir + if args.model is not None: + MODEL_NAME = args.model + else: + MODEL_NAME = config['model'] + if args.dataset is not None: + DATASET_NAME = args.dataset + else: + DATASET_NAME = config['dataset'] + dataset = LoadData(DATASET_NAME) + if args.out_dir is not None: + out_dir = args.out_dir + else: + out_dir = config['out_dir'] + # parameters + params = config['params'] + if args.seed is not None: + params['seed'] = int(args.seed) + if args.epochs is not None: + params['epochs'] = int(args.epochs) + if args.batch_size is not None: + params['batch_size'] = int(args.batch_size) + if args.init_lr is not None: + params['init_lr'] = float(args.init_lr) + if args.lr_reduce_factor is not None: + params['lr_reduce_factor'] = float(args.lr_reduce_factor) + if args.lr_schedule_patience is not None: + params['lr_schedule_patience'] = int(args.lr_schedule_patience) + if args.min_lr is not None: + params['min_lr'] = float(args.min_lr) + if args.weight_decay is not None: + params['weight_decay'] = float(args.weight_decay) + if args.print_epoch_interval is not None: + params['print_epoch_interval'] = int(args.print_epoch_interval) + if args.max_time is not None: + params['max_time'] = float(args.max_time) + # network parameters + net_params = config['net_params'] + net_params['device'] = device + net_params['gpu_id'] = config['gpu']['id'] + net_params['batch_size'] = params['batch_size'] + if args.L is not None: + net_params['L'] = int(args.L) + if args.hidden_dim is not None: + net_params['hidden_dim'] = int(args.hidden_dim) + if args.out_dim is not None: + net_params['out_dim'] = int(args.out_dim) + if args.residual is not None: + net_params['residual'] = True if args.residual=='True' else False + if args.edge_feat is not None: + net_params['edge_feat'] = True if args.edge_feat=='True' else False + if args.readout is not None: + net_params['readout'] = args.readout + if args.kernel is not None: + net_params['kernel'] = int(args.kernel) + if args.n_heads is not None: + net_params['n_heads'] = int(args.n_heads) + if args.gated is not None: + net_params['gated'] = True if args.gated=='True' else False + if args.in_feat_dropout is not None: + net_params['in_feat_dropout'] = float(args.in_feat_dropout) + if args.dropout is not None: + net_params['dropout'] = float(args.dropout) + if args.layer_norm is not None: + net_params['layer_norm'] = True if args.layer_norm=='True' else False + if args.batch_norm is not None: + net_params['batch_norm'] = True if args.batch_norm=='True' else False + if args.sage_aggregator is not None: + net_params['sage_aggregator'] = args.sage_aggregator + if args.data_mode is not None: + net_params['data_mode'] = args.data_mode + if args.num_pool is not None: + net_params['num_pool'] = int(args.num_pool) + if args.gnn_per_block is not None: + net_params['gnn_per_block'] = int(args.gnn_per_block) + if args.embedding_dim is not None: + net_params['embedding_dim'] = int(args.embedding_dim) + if args.pool_ratio is not None: + net_params['pool_ratio'] = float(args.pool_ratio) + if args.linkpred is not None: + net_params['linkpred'] = True if args.linkpred=='True' else False + if args.cat is not None: + net_params['cat'] = True if args.cat=='True' else False + if args.self_loop is not None: + net_params['self_loop'] = True if args.self_loop=='True' else False + if args.pos_enc_dim is not None: + net_params['pos_enc_dim'] = int(args.pos_enc_dim) + if args.alpha_loss is not None: + net_params['alpha_loss'] = float(args.alpha_loss) + if args.lambda_loss is not None: + net_params['lambda_loss'] = float(args.lambda_loss) + if args.pe_init is not None: + net_params['pe_init'] = args.pe_init + + + + # OGBMOL* + num_classes = dataset.dataset.num_tasks # provided by OGB dataset class + net_params['n_classes'] = num_classes + + if MODEL_NAME == 'PNA': + D = torch.cat([torch.sparse.sum(g.adjacency_matrix(transpose=True), dim=-1).to_dense() for g, label in + dataset.train]) + net_params['avg_d'] = dict(lin=torch.mean(D), + exp=torch.mean(torch.exp(torch.div(1, D)) - 1), + log=torch.mean(torch.log(D + 1))) + + root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + viz_dir = out_dir + 'viz/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file, viz_dir + + if not os.path.exists(out_dir + 'results'): + os.makedirs(out_dir + 'results') + + if not os.path.exists(out_dir + 'configs'): + os.makedirs(out_dir + 'configs') + + net_params['total_param'] = view_model_param(MODEL_NAME, net_params) + train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs) + + + + + + + + +main() + + + + + + + + + + + + + + + diff --git a/main_ZINC_graph_regression.py b/main_ZINC_graph_regression.py new file mode 100644 index 0000000..91d028e --- /dev/null +++ b/main_ZINC_graph_regression.py @@ -0,0 +1,453 @@ + + + + + +""" + IMPORTING LIBS +""" +import dgl + +import numpy as np +import os +import socket +import time +import random +import glob +import argparse, json +import pickle + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch.optim as optim +from torch.utils.data import DataLoader + +from tensorboardX import SummaryWriter +from tqdm import tqdm + +import matplotlib +import matplotlib.pyplot as plt + + +class DotDict(dict): + def __init__(self, **kwds): + self.update(kwds) + self.__dict__ = self + + + + + + +""" + IMPORTING CUSTOM MODULES/METHODS +""" +from nets.ZINC_graph_regression.load_net import gnn_model # import all GNNS +from data.data import LoadData # import dataset + + + + +""" + GPU Setup +""" +def gpu_setup(use_gpu, gpu_id): + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) + + if torch.cuda.is_available() and use_gpu: + print('cuda available with GPU:',torch.cuda.get_device_name(0)) + device = torch.device("cuda") + else: + print('cuda not available') + device = torch.device("cpu") + return device + + + + + + + + +""" + VIEWING MODEL CONFIG AND PARAMS +""" +def view_model_param(MODEL_NAME, net_params): + model = gnn_model(MODEL_NAME, net_params) + total_param = 0 + print("MODEL DETAILS:\n") + #print(model) + for param in model.parameters(): + # print(param.data.size()) + total_param += np.prod(list(param.data.size())) + print('MODEL/Total parameters:', MODEL_NAME, total_param) + return total_param + + +""" + TRAINING CODE +""" + +def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs): + t0 = time.time() + per_epoch_time = [] + + DATASET_NAME = dataset.name + + if net_params['pe_init'] == 'lap_pe': + tt = time.time() + print("[!] -LapPE: Initializing graph positional encoding with Laplacian PE.") + dataset._add_lap_positional_encodings(net_params['pos_enc_dim']) + print("[!] Time taken: ", time.time()-tt) + elif net_params['pe_init'] == 'rand_walk': + tt = time.time() + print("[!] -LSPE: Initializing graph positional encoding with rand walk features.") + dataset._init_positional_encodings(net_params['pos_enc_dim'], net_params['pe_init']) + print("[!] Time taken: ", time.time()-tt) + + tt = time.time() + print("[!] -LSPE (For viz later): Adding lapeigvecs to key 'eigvec' for every graph.") + dataset._add_eig_vecs(net_params['pos_enc_dim']) + print("[!] Time taken: ", time.time()-tt) + + if MODEL_NAME in ['SAN', 'GraphiT']: + if net_params['full_graph']: + st = time.time() + print("[!] Adding full graph connectivity..") + dataset._make_full_graph() if MODEL_NAME == 'SAN' else dataset._make_full_graph((net_params['p_steps'], net_params['gamma'])) + print('Time taken to add full graph connectivity: ',time.time()-st) + + trainset, valset, testset = dataset.train, dataset.val, dataset.test + + root_log_dir, root_ckpt_dir, write_file_name, write_config_file, viz_dir = dirs + device = net_params['device'] + + # Write the network and optimization hyper-parameters in folder config/ + with open(write_config_file + '.txt', 'w') as f: + f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param'])) + + log_dir = os.path.join(root_log_dir, "RUN_" + str(0)) + writer = SummaryWriter(log_dir=log_dir) + + # setting seeds + random.seed(params['seed']) + np.random.seed(params['seed']) + torch.manual_seed(params['seed']) + if device.type == 'cuda': + torch.cuda.manual_seed(params['seed']) + torch.cuda.manual_seed_all(params['seed']) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + print("Training Graphs: ", len(trainset)) + print("Validation Graphs: ", len(valset)) + print("Test Graphs: ", len(testset)) + + model = gnn_model(MODEL_NAME, net_params) + model = model.to(device) + + optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', + factor=params['lr_reduce_factor'], + patience=params['lr_schedule_patience'], + verbose=True) + + epoch_train_losses, epoch_val_losses = [], [] + epoch_train_MAEs, epoch_val_MAEs = [], [] + + # import train functions for all GNNs + from train.train_ZINC_graph_regression import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network + + train_loader = DataLoader(trainset, num_workers=4, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate) + val_loader = DataLoader(valset, num_workers=4, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) + test_loader = DataLoader(testset, num_workers=4, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) + + # At any point you can hit Ctrl + C to break out of training early. + try: + with tqdm(range(params['epochs'])) as t: + for epoch in t: + + t.set_description('Epoch %d' % epoch) + + start = time.time() + + epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch) + + epoch_val_loss, epoch_val_mae, __ = evaluate_network(model, device, val_loader, epoch) + epoch_test_loss, epoch_test_mae, __ = evaluate_network(model, device, test_loader, epoch) + del __ + + epoch_train_losses.append(epoch_train_loss) + epoch_val_losses.append(epoch_val_loss) + epoch_train_MAEs.append(epoch_train_mae) + epoch_val_MAEs.append(epoch_val_mae) + + writer.add_scalar('train/_loss', epoch_train_loss, epoch) + writer.add_scalar('val/_loss', epoch_val_loss, epoch) + writer.add_scalar('train/_mae', epoch_train_mae, epoch) + writer.add_scalar('val/_mae', epoch_val_mae, epoch) + writer.add_scalar('test/_mae', epoch_test_mae, epoch) + writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) + + + t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'], + train_loss=epoch_train_loss, val_loss=epoch_val_loss, + train_MAE=epoch_train_mae, val_MAE=epoch_val_mae, + test_MAE=epoch_test_mae) + + + per_epoch_time.append(time.time()-start) + + # Saving checkpoint + ckpt_dir = os.path.join(root_ckpt_dir, "RUN_") + if not os.path.exists(ckpt_dir): + os.makedirs(ckpt_dir) + torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch))) + + files = glob.glob(ckpt_dir + '/*.pkl') + for file in files: + epoch_nb = file.split('_')[-1] + epoch_nb = int(epoch_nb.split('.')[0]) + if epoch_nb < epoch-1: + os.remove(file) + + scheduler.step(epoch_val_loss) + + if optimizer.param_groups[0]['lr'] < params['min_lr']: + print("\n!! LR EQUAL TO MIN LR SET.") + break + + # Stop training after params['max_time'] hours + if time.time()-t0 > params['max_time']*3600: + print('-' * 89) + print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time'])) + break + + except KeyboardInterrupt: + print('-' * 89) + print('Exiting from training early because of KeyboardInterrupt') + + test_loss_lapeig, test_mae, g_outs_test = evaluate_network(model, device, test_loader, epoch) + train_loss_lapeig, train_mae, g_outs_train = evaluate_network(model, device, train_loader, epoch) + + print("Test MAE: {:.4f}".format(test_mae)) + print("Train MAE: {:.4f}".format(train_mae)) + print("Convergence Time (Epochs): {:.4f}".format(epoch)) + print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0)) + print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time))) + + + if net_params['pe_init'] == 'rand_walk': + # Visualize actual and predicted/learned eigenvecs + from utils.plot_util import plot_graph_eigvec + if not os.path.exists(viz_dir): + os.makedirs(viz_dir) + + sample_graph_ids = [15,25,45] + + for f_idx, graph_id in enumerate(sample_graph_ids): + + # Test graphs + g_dgl = g_outs_test[graph_id] + + f = plt.figure(f_idx, figsize=(12,6)) + + plt1 = f.add_subplot(121) + plot_graph_eigvec(plt1, graph_id, g_dgl, feature_key='eigvec', actual_eigvecs=True) + + plt2 = f.add_subplot(122) + plot_graph_eigvec(plt2, graph_id, g_dgl, feature_key='p', predicted_eigvecs=True) + + f.savefig(viz_dir+'/test'+str(graph_id)+'.jpg') + + # Train graphs + g_dgl = g_outs_train[graph_id] + + f = plt.figure(f_idx, figsize=(12,6)) + + plt1 = f.add_subplot(121) + plot_graph_eigvec(plt1, graph_id, g_dgl, feature_key='eigvec', actual_eigvecs=True) + + plt2 = f.add_subplot(122) + plot_graph_eigvec(plt2, graph_id, g_dgl, feature_key='p', predicted_eigvecs=True) + + f.savefig(viz_dir+'/train'+str(graph_id)+'.jpg') + + writer.close() + + """ + Write the results in out_dir/results folder + """ + with open(write_file_name + '.txt', 'w') as f: + f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n + FINAL RESULTS\nTEST MAE: {:.4f}\nTRAIN MAE: {:.4f}\n\n + Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ + .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], + test_mae, train_mae, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time))) + + + + + +def main(): + """ + USER CONTROLS + """ + + + parser = argparse.ArgumentParser() + parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details") + parser.add_argument('--gpu_id', help="Please give a value for gpu id") + parser.add_argument('--model', help="Please give a value for model name") + parser.add_argument('--dataset', help="Please give a value for dataset name") + parser.add_argument('--out_dir', help="Please give a value for out_dir") + parser.add_argument('--seed', help="Please give a value for seed") + parser.add_argument('--epochs', help="Please give a value for epochs") + parser.add_argument('--batch_size', help="Please give a value for batch_size") + parser.add_argument('--init_lr', help="Please give a value for init_lr") + parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor") + parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience") + parser.add_argument('--min_lr', help="Please give a value for min_lr") + parser.add_argument('--weight_decay', help="Please give a value for weight_decay") + parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval") + parser.add_argument('--L', help="Please give a value for L") + parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim") + parser.add_argument('--out_dim', help="Please give a value for out_dim") + parser.add_argument('--residual', help="Please give a value for residual") + parser.add_argument('--edge_feat', help="Please give a value for edge_feat") + parser.add_argument('--readout', help="Please give a value for readout") + parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout") + parser.add_argument('--dropout', help="Please give a value for dropout") + parser.add_argument('--layer_norm', help="Please give a value for layer_norm") + parser.add_argument('--batch_norm', help="Please give a value for batch_norm") + parser.add_argument('--max_time', help="Please give a value for max_time") + parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim") + parser.add_argument('--pos_enc', help="Please give a value for pos_enc") + parser.add_argument('--alpha_loss', help="Please give a value for alpha_loss") + parser.add_argument('--lambda_loss', help="Please give a value for lambda_loss") + parser.add_argument('--pe_init', help="Please give a value for pe_init") + args = parser.parse_args() + with open(args.config) as f: + config = json.load(f) + + # device + if args.gpu_id is not None: + config['gpu']['id'] = int(args.gpu_id) + config['gpu']['use'] = True + device = gpu_setup(config['gpu']['use'], config['gpu']['id']) + # model, dataset, out_dir + if args.model is not None: + MODEL_NAME = args.model + else: + MODEL_NAME = config['model'] + if args.dataset is not None: + DATASET_NAME = args.dataset + else: + DATASET_NAME = config['dataset'] + dataset = LoadData(DATASET_NAME) + if args.out_dir is not None: + out_dir = args.out_dir + else: + out_dir = config['out_dir'] + # parameters + params = config['params'] + if args.seed is not None: + params['seed'] = int(args.seed) + if args.epochs is not None: + params['epochs'] = int(args.epochs) + if args.batch_size is not None: + params['batch_size'] = int(args.batch_size) + if args.init_lr is not None: + params['init_lr'] = float(args.init_lr) + if args.lr_reduce_factor is not None: + params['lr_reduce_factor'] = float(args.lr_reduce_factor) + if args.lr_schedule_patience is not None: + params['lr_schedule_patience'] = int(args.lr_schedule_patience) + if args.min_lr is not None: + params['min_lr'] = float(args.min_lr) + if args.weight_decay is not None: + params['weight_decay'] = float(args.weight_decay) + if args.print_epoch_interval is not None: + params['print_epoch_interval'] = int(args.print_epoch_interval) + if args.max_time is not None: + params['max_time'] = float(args.max_time) + # network parameters + net_params = config['net_params'] + net_params['device'] = device + net_params['gpu_id'] = config['gpu']['id'] + net_params['batch_size'] = params['batch_size'] + if args.L is not None: + net_params['L'] = int(args.L) + if args.hidden_dim is not None: + net_params['hidden_dim'] = int(args.hidden_dim) + if args.out_dim is not None: + net_params['out_dim'] = int(args.out_dim) + if args.residual is not None: + net_params['residual'] = True if args.residual=='True' else False + if args.edge_feat is not None: + net_params['edge_feat'] = True if args.edge_feat=='True' else False + if args.readout is not None: + net_params['readout'] = args.readout + if args.in_feat_dropout is not None: + net_params['in_feat_dropout'] = float(args.in_feat_dropout) + if args.dropout is not None: + net_params['dropout'] = float(args.dropout) + if args.layer_norm is not None: + net_params['layer_norm'] = True if args.layer_norm=='True' else False + if args.batch_norm is not None: + net_params['batch_norm'] = True if args.batch_norm=='True' else False + if args.pos_enc is not None: + net_params['pos_enc'] = True if args.pos_enc=='True' else False + if args.pos_enc_dim is not None: + net_params['pos_enc_dim'] = int(args.pos_enc_dim) + if args.alpha_loss is not None: + net_params['alpha_loss'] = float(args.alpha_loss) + if args.lambda_loss is not None: + net_params['lambda_loss'] = float(args.lambda_loss) + if args.pe_init is not None: + net_params['pe_init'] = args.pe_init + + + # ZINC + net_params['num_atom_type'] = dataset.num_atom_type + net_params['num_bond_type'] = dataset.num_bond_type + + if MODEL_NAME == 'PNA': + D = torch.cat([torch.sparse.sum(g.adjacency_matrix(transpose=True), dim=-1).to_dense() for g in + dataset.train.graph_lists]) + net_params['avg_d'] = dict(lin=torch.mean(D), + exp=torch.mean(torch.exp(torch.div(1, D)) - 1), + log=torch.mean(torch.log(D + 1))) + + root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + viz_dir = out_dir + 'viz/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file, viz_dir + + if not os.path.exists(out_dir + 'results'): + os.makedirs(out_dir + 'results') + + if not os.path.exists(out_dir + 'configs'): + os.makedirs(out_dir + 'configs') + + net_params['total_param'] = view_model_param(MODEL_NAME, net_params) + train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs) + + + + + + + + +main() + + + + + diff --git a/nets/OGBMOL_graph_classification/gatedgcn_net.py b/nets/OGBMOL_graph_classification/gatedgcn_net.py new file mode 100644 index 0000000..3bb2c9c --- /dev/null +++ b/nets/OGBMOL_graph_classification/gatedgcn_net.py @@ -0,0 +1,137 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl + +from scipy import sparse as sp +from scipy.sparse.linalg import norm + +from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder + +""" + GatedGCN and GatedGCN-LSPE + +""" + +from layers.gatedgcn_layer import GatedGCNLayer +from layers.gatedgcn_lspe_layer import GatedGCNLSPELayer +from layers.mlp_readout_layer import MLPReadout + + +class GatedGCNNet(nn.Module): + def __init__(self, net_params): + super().__init__() + hidden_dim = net_params['hidden_dim'] + out_dim = net_params['out_dim'] + n_classes = net_params['n_classes'] + dropout = net_params['dropout'] + n_layers = net_params['L'] + self.readout = net_params['readout'] + self.batch_norm = net_params['batch_norm'] + self.residual = net_params['residual'] + self.device = net_params['device'] + self.pe_init = net_params['pe_init'] + self.n_classes = net_params['n_classes'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk', 'lap_pe']: + self.embedding_p = nn.Linear(self.pos_enc_dim, hidden_dim) + + self.atom_encoder = AtomEncoder(hidden_dim) + self.bond_encoder = BondEncoder(hidden_dim) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ GatedGCNLSPELayer(hidden_dim, hidden_dim, dropout, self.batch_norm, self.residual) + for _ in range(n_layers-1) ]) + self.layers.append(GatedGCNLSPELayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual)) + else: + # NoPE or LapPE + self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.batch_norm, self.residual) + for _ in range(n_layers-1) ]) + self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual)) + + self.MLP_layer = MLPReadout(out_dim, n_classes) + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(out_dim+self.pos_enc_dim, out_dim) + + self.g = None # For util; To be accessed in loss() function + + def forward(self, g, h, p, e, snorm_n): + + h = self.atom_encoder(h) + e = self.bond_encoder(e) + + if self.pe_init in ['rand_walk', 'lap_pe']: + p = self.embedding_p(p) + + if self.pe_init == 'lap_pe': + h = h + p + p = None + + for conv in self.layers: + h, p, e = conv(g, h, p, e, snorm_n) + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = self.p_out(p) + g.ndata['p'] = p + + if self.use_lapeig_loss: + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + if self.pe_init == 'rand_walk': + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + if self.n_classes == 128: + return_g = None # not passing PCBA graphs due to memory + else: + return_g = g + + return self.MLP_layer(hg), return_g + + def loss(self, pred, labels): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = torch.nn.BCEWithLogitsLoss()(pred, labels) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/OGBMOL_graph_classification/graphit_net.py b/nets/OGBMOL_graph_classification/graphit_net.py new file mode 100644 index 0000000..ec94cd5 --- /dev/null +++ b/nets/OGBMOL_graph_classification/graphit_net.py @@ -0,0 +1,138 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl + +from scipy import sparse as sp +from scipy.sparse.linalg import norm + +from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder + +""" + GraphiT-GT and GraphiT-GT-LSPE + +""" + +from layers.graphit_gt_layer import GraphiT_GT_Layer +from layers.graphit_gt_lspe_layer import GraphiT_GT_LSPE_Layer +from layers.mlp_readout_layer import MLPReadout + +class GraphiTNet(nn.Module): + def __init__(self, net_params): + super().__init__() + + full_graph = net_params['full_graph'] + gamma = net_params['gamma'] + self.adaptive_edge_PE = net_params['adaptive_edge_PE'] + + GT_layers = net_params['L'] + GT_hidden_dim = net_params['hidden_dim'] + GT_out_dim = net_params['out_dim'] + GT_n_heads = net_params['n_heads'] + + self.residual = net_params['residual'] + self.readout = net_params['readout'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + + self.readout = net_params['readout'] + self.layer_norm = net_params['layer_norm'] + self.batch_norm = net_params['batch_norm'] + + n_classes = net_params['n_classes'] + self.device = net_params['device'] + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, GT_hidden_dim) + + self.embedding_h = AtomEncoder(GT_hidden_dim) + self.embedding_e = BondEncoder(GT_hidden_dim) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ GraphiT_GT_LSPE_Layer(gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE) for _ in range(GT_layers-1) ]) + self.layers.append(GraphiT_GT_LSPE_Layer(gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE)) + else: + # NoPE + self.layers = nn.ModuleList([ GraphiT_GT_Layer(gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE) for _ in range(GT_layers-1) ]) + self.layers.append(GraphiT_GT_Layer(gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE)) + + self.MLP_layer = MLPReadout(GT_out_dim, n_classes) + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(GT_out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(GT_out_dim+self.pos_enc_dim, GT_out_dim) + + self.g = None # For util; To be accessed in loss() function + + def forward(self, g, h, p, e, snorm_n): + + h = self.embedding_h(h) + e = self.embedding_e(e) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + for conv in self.layers: + h, p = conv(g, h, p, e, snorm_n) + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = self.p_out(p) + g.ndata['p'] = p + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, pred, labels): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = torch.nn.BCEWithLogitsLoss()(pred, labels) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/OGBMOL_graph_classification/load_net.py b/nets/OGBMOL_graph_classification/load_net.py new file mode 100644 index 0000000..9c8c3b5 --- /dev/null +++ b/nets/OGBMOL_graph_classification/load_net.py @@ -0,0 +1,31 @@ +""" + Utility file to select GraphNN model as + selected by the user +""" + +from nets.OGBMOL_graph_classification.gatedgcn_net import GatedGCNNet +from nets.OGBMOL_graph_classification.pna_net import PNANet +from nets.OGBMOL_graph_classification.san_net import SANNet +from nets.OGBMOL_graph_classification.graphit_net import GraphiTNet + +def GatedGCN(net_params): + return GatedGCNNet(net_params) + +def PNA(net_params): + return PNANet(net_params) + +def SAN(net_params): + return SANNet(net_params) + +def GraphiT(net_params): + return GraphiTNet(net_params) + +def gnn_model(MODEL_NAME, net_params): + models = { + 'GatedGCN': GatedGCN, + 'PNA': PNA, + 'SAN': SAN, + 'GraphiT': GraphiT + } + + return models[MODEL_NAME](net_params) \ No newline at end of file diff --git a/nets/OGBMOL_graph_classification/pna_net.py b/nets/OGBMOL_graph_classification/pna_net.py new file mode 100644 index 0000000..00bf82c --- /dev/null +++ b/nets/OGBMOL_graph_classification/pna_net.py @@ -0,0 +1,199 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl + +from scipy import sparse as sp +from scipy.sparse.linalg import norm + +from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder + +""" + PNA-LSPE + +""" + +from layers.pna_layer import PNANoTowersLayer as PNALayer +from layers.pna_lspe_layer import PNANoTowersLSPELayer as PNALSPELayer +from layers.mlp_readout_layer import MLPReadout2 as MLPReadout + + +class PNANet(nn.Module): + def __init__(self, net_params): + super().__init__() + + hidden_dim = net_params['hidden_dim'] + out_dim = net_params['out_dim'] + n_classes = net_params['n_classes'] + dropout = net_params['dropout'] + self.dropout_2 = net_params['dropout_2'] + + n_layers = net_params['L'] + self.readout = net_params['readout'] + self.graph_norm = net_params['graph_norm'] + self.batch_norm = net_params['batch_norm'] + self.aggregators = net_params['aggregators'] + self.scalers = net_params['scalers'] + self.avg_d = net_params['avg_d'] + self.residual = net_params['residual'] + self.edge_feat = net_params['edge_feat'] + edge_dim = net_params['edge_dim'] + pretrans_layers = net_params['pretrans_layers'] + posttrans_layers = net_params['posttrans_layers'] + self.gru_enable = net_params['gru'] + device = net_params['device'] + self.device = device + self.pe_init = net_params['pe_init'] + self.n_classes = net_params['n_classes'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, hidden_dim) + + self.embedding_h = AtomEncoder(emb_dim=hidden_dim) + + if self.edge_feat: + self.embedding_e = BondEncoder(emb_dim=edge_dim) + + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList( + [PNALSPELayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, graph_norm=self.graph_norm, + batch_norm=self.batch_norm, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, residual=self.residual, + edge_features=self.edge_feat, edge_dim=edge_dim, use_lapeig_loss=self.use_lapeig_loss) + for _ in range(n_layers - 1)]) + self.layers.append(PNALSPELayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout, graph_norm=self.graph_norm, + batch_norm=self.batch_norm, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, + residual=self.residual, edge_features=self.edge_feat, edge_dim=edge_dim, use_lapeig_loss=self.use_lapeig_loss)) + + else: + # NoPE + self.layers = nn.ModuleList( + [PNALayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, graph_norm=self.graph_norm, + batch_norm=self.batch_norm, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, residual=self.residual, + edge_features=self.edge_feat, edge_dim=edge_dim, use_lapeig_loss=self.use_lapeig_loss) + for _ in range(n_layers - 1)]) + self.layers.append(PNALayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout, graph_norm=self.graph_norm, + batch_norm=self.batch_norm, aggregators=self.aggregators, scalers=self.scalers, avg_d=self.avg_d, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, + residual=self.residual, edge_features=self.edge_feat, edge_dim=edge_dim, use_lapeig_loss=self.use_lapeig_loss)) + + if self.gru_enable: + self.gru = GRU(hidden_dim, hidden_dim, device) + + self.MLP_layer = MLPReadout(out_dim, n_classes, self.dropout_2) + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(out_dim+self.pos_enc_dim, out_dim) + + self.g = None # For util; To be accessed in loss() function + + def forward(self, g, h, p, e, snorm_n): + + h = self.embedding_h(h) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + if self.edge_feat: + e = self.embedding_e(e) + + for i, conv in enumerate(self.layers): + h_t, p_t = conv(g, h, p, e, snorm_n) + if self.gru_enable and i != len(self.layers) - 1: + h_t = self.gru(h, h_t) + h, p = h_t, p_t + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = F.dropout(p, self.dropout_2, training=self.training) + p = self.p_out(p) + g.ndata['p'] = p + + if self.use_lapeig_loss: + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + if self.pe_init == 'rand_walk': + # Concat h and p + hp = torch.cat((g.ndata['h'],g.ndata['p']),dim=-1) + hp = F.dropout(hp, self.dropout_2, training=self.training) + hp = self.Whp(hp) + g.ndata['h'] = hp + + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + if self.n_classes == 128: + return_g = None # not passing PCBA graphs due to memory + else: + return_g = g + + return self.MLP_layer(hg), return_g + + def loss(self, pred, labels): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = torch.nn.BCEWithLogitsLoss()(pred, labels) + + if self.use_lapeig_loss: + # Loss B: Laplacian Eigenvector Loss -------------------------------------------- + g = self.g + n = g.number_of_nodes() + + # Laplacian + A = g.adjacency_matrix(scipy_fmt="csr") + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(n) - N * A * N + + p = g.ndata['p'] + pT = torch.transpose(p, 1, 0) + loss_b_1 = torch.trace(torch.mm(torch.mm(pT, torch.Tensor(L.todense()).to(self.device)), p)) + + # Correct batch-graph wise loss_b_2 implementation; using a block diagonal matrix + bg = dgl.unbatch(g) + batch_size = len(bg) + P = sp.block_diag([bg[i].ndata['p'].detach().cpu() for i in range(batch_size)]) + PTP_In = P.T * P - sp.eye(P.shape[1]) + loss_b_2 = torch.tensor(norm(PTP_In, 'fro')**2).float().to(self.device) + + loss_b = ( loss_b_1 + self.lambda_loss * loss_b_2 ) / ( self.pos_enc_dim * batch_size * n) + + del bg, P, PTP_In, loss_b_1, loss_b_2 + + loss = loss_a + self.alpha_loss * loss_b + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/OGBMOL_graph_classification/san_net.py b/nets/OGBMOL_graph_classification/san_net.py new file mode 100644 index 0000000..62f9791 --- /dev/null +++ b/nets/OGBMOL_graph_classification/san_net.py @@ -0,0 +1,141 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl + +from scipy import sparse as sp +from scipy.sparse.linalg import norm + +from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder + +""" + SAN-GT and SAN-GT-LSPE + +""" + +from layers.san_gt_layer import SAN_GT_Layer +from layers.san_gt_lspe_layer import SAN_GT_LSPE_Layer +from layers.mlp_readout_layer import MLPReadout + + +class SANNet(nn.Module): + def __init__(self, net_params): + super().__init__() + + full_graph = net_params['full_graph'] + init_gamma = net_params['init_gamma'] + + # learn gamma + self.gamma = nn.Parameter(torch.FloatTensor([init_gamma])) + + GT_layers = net_params['L'] + GT_hidden_dim = net_params['hidden_dim'] + GT_out_dim = net_params['out_dim'] + GT_n_heads = net_params['n_heads'] + + self.residual = net_params['residual'] + self.readout = net_params['readout'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + + self.readout = net_params['readout'] + self.layer_norm = net_params['layer_norm'] + self.batch_norm = net_params['batch_norm'] + + n_classes = net_params['n_classes'] + self.device = net_params['device'] + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, GT_hidden_dim) + + self.embedding_h = AtomEncoder(GT_hidden_dim) + self.embedding_e = BondEncoder(GT_hidden_dim) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ SAN_GT_LSPE_Layer(self.gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range(GT_layers-1) ]) + self.layers.append(SAN_GT_LSPE_Layer(self.gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual)) + else: + # NoPE + self.layers = nn.ModuleList([ SAN_GT_Layer(self.gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range(GT_layers-1) ]) + self.layers.append(SAN_GT_Layer(self.gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual)) + + self.MLP_layer = MLPReadout(GT_out_dim, n_classes) + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(GT_out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(GT_out_dim+self.pos_enc_dim, GT_out_dim) + + self.g = None # For util; To be accessed in loss() function + + def forward(self, g, h, p, e, snorm_n): + + h = self.embedding_h(h) + e = self.embedding_e(e) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + for conv in self.layers: + h, p = conv(g, h, p, e, snorm_n) + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = self.p_out(p) + g.ndata['p'] = p + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, pred, labels): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = torch.nn.BCEWithLogitsLoss()(pred, labels) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/ZINC_graph_regression/gatedgcn_net.py b/nets/ZINC_graph_regression/gatedgcn_net.py new file mode 100644 index 0000000..d3150a1 --- /dev/null +++ b/nets/ZINC_graph_regression/gatedgcn_net.py @@ -0,0 +1,171 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl + +from scipy import sparse as sp +from scipy.sparse.linalg import norm + +""" + GatedGCN and GatedGCN-LSPE + +""" +from layers.gatedgcn_layer import GatedGCNLayer +from layers.gatedgcn_lspe_layer import GatedGCNLSPELayer +from layers.mlp_readout_layer import MLPReadout + +class GatedGCNNet(nn.Module): + def __init__(self, net_params): + super().__init__() + num_atom_type = net_params['num_atom_type'] + num_bond_type = net_params['num_bond_type'] + hidden_dim = net_params['hidden_dim'] + out_dim = net_params['out_dim'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + self.n_layers = net_params['L'] + self.readout = net_params['readout'] + self.batch_norm = net_params['batch_norm'] + self.residual = net_params['residual'] + self.edge_feat = net_params['edge_feat'] + self.device = net_params['device'] + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk', 'lap_pe']: + self.embedding_p = nn.Linear(self.pos_enc_dim, hidden_dim) + + self.embedding_h = nn.Embedding(num_atom_type, hidden_dim) + + if self.edge_feat: + self.embedding_e = nn.Embedding(num_bond_type, hidden_dim) + else: + self.embedding_e = nn.Linear(1, hidden_dim) + + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ GatedGCNLSPELayer(hidden_dim, hidden_dim, dropout, + self.batch_norm, residual=self.residual) for _ in range(self.n_layers-1) ]) + self.layers.append(GatedGCNLSPELayer(hidden_dim, out_dim, dropout, self.batch_norm, residual=self.residual)) + else: + # NoPE or LapPE + self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout, + self.batch_norm, residual=self.residual, graph_norm=False) for _ in range(self.n_layers-1) ]) + self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, residual=self.residual, graph_norm=False)) + + self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(out_dim+self.pos_enc_dim, out_dim) + + self.g = None # For util; To be accessed in loss() function + + + def forward(self, g, h, p, e, snorm_n): + + # input embedding + h = self.embedding_h(h) + h = self.in_feat_dropout(h) + + if self.pe_init in ['rand_walk', 'lap_pe']: + p = self.embedding_p(p) + + if self.pe_init == 'lap_pe': + h = h + p + p = None + + if not self.edge_feat: # edge feature set to 1 + e = torch.ones(e.size(0),1).to(self.device) + e = self.embedding_e(e) + + + # convnets + for conv in self.layers: + h, p, e = conv(g, h, p, e, snorm_n) + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + p = self.p_out(p) + g.ndata['p'] = p + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + # readout + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, scores, targets): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = nn.L1Loss()(scores, targets) + + if self.use_lapeig_loss: + # Loss B: Laplacian Eigenvector Loss -------------------------------------------- + g = self.g + n = g.number_of_nodes() + + # Laplacian + A = g.adjacency_matrix(scipy_fmt="csr") + N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) + L = sp.eye(n) - N * A * N + + p = g.ndata['p'] + pT = torch.transpose(p, 1, 0) + loss_b_1 = torch.trace(torch.mm(torch.mm(pT, torch.Tensor(L.todense()).to(self.device)), p)) + + # Correct batch-graph wise loss_b_2 implementation; using a block diagonal matrix + bg = dgl.unbatch(g) + batch_size = len(bg) + P = sp.block_diag([bg[i].ndata['p'].detach().cpu() for i in range(batch_size)]) + PTP_In = P.T * P - sp.eye(P.shape[1]) + loss_b_2 = torch.tensor(norm(PTP_In, 'fro')**2).float().to(self.device) + + loss_b = ( loss_b_1 + self.lambda_loss * loss_b_2 ) / ( self.pos_enc_dim * batch_size * n) + + del bg, P, PTP_In, loss_b_1, loss_b_2 + + loss = loss_a + self.alpha_loss * loss_b + else: + loss = loss_a + + return loss + + + + + \ No newline at end of file diff --git a/nets/ZINC_graph_regression/graphit_net.py b/nets/ZINC_graph_regression/graphit_net.py new file mode 100644 index 0000000..48b28f1 --- /dev/null +++ b/nets/ZINC_graph_regression/graphit_net.py @@ -0,0 +1,145 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import numpy as np + +from scipy import sparse as sp + +""" + GraphiT-GT and GraphiT-GT-LSPE + +""" +from layers.graphit_gt_layer import GraphiT_GT_Layer +from layers.graphit_gt_lspe_layer import GraphiT_GT_LSPE_Layer +from layers.mlp_readout_layer import MLPReadout + +class GraphiTNet(nn.Module): + def __init__(self, net_params): + super().__init__() + + num_atom_type = net_params['num_atom_type'] + num_bond_type = net_params['num_bond_type'] + + full_graph = net_params['full_graph'] + gamma = net_params['gamma'] + self.adaptive_edge_PE = net_params['adaptive_edge_PE'] + + GT_layers = net_params['L'] + GT_hidden_dim = net_params['hidden_dim'] + GT_out_dim = net_params['out_dim'] + GT_n_heads = net_params['n_heads'] + + self.residual = net_params['residual'] + self.readout = net_params['readout'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + + self.readout = net_params['readout'] + self.layer_norm = net_params['layer_norm'] + self.batch_norm = net_params['batch_norm'] + + self.device = net_params['device'] + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, GT_hidden_dim) + + self.embedding_h = nn.Embedding(num_atom_type, GT_hidden_dim) + self.embedding_e = nn.Embedding(num_bond_type, GT_hidden_dim) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ GraphiT_GT_LSPE_Layer(gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, dropout, + self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE) for _ in range(GT_layers-1) ]) + self.layers.append(GraphiT_GT_LSPE_Layer(gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, dropout, + self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE)) + else: + # NoPE + self.layers = nn.ModuleList([ GraphiT_GT_Layer(gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, dropout, + self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE) for _ in range(GT_layers-1) ]) + self.layers.append(GraphiT_GT_Layer(gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, dropout, + self.layer_norm, self.batch_norm, self.residual, self.adaptive_edge_PE)) + + self.MLP_layer = MLPReadout(GT_out_dim, 1) # 1 out dim since regression problem + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(GT_out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(GT_out_dim+self.pos_enc_dim, GT_out_dim) + + self.g = None # For util; To be accessed in loss() function + + + def forward(self, g, h, p, e, snorm_n): + + # input embedding + h = self.embedding_h(h) + e = self.embedding_e(e) + + h = self.in_feat_dropout(h) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + # GNN + for conv in self.layers: + h, p = conv(g, h, p, e, snorm_n) + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = self.p_out(p) + g.ndata['p'] = p + + if self.use_lapeig_loss and self.pe_init == 'rand_walk': + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + if self.pe_init == 'rand_walk': + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + # readout + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, scores, targets): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = nn.L1Loss()(scores, targets) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/ZINC_graph_regression/load_net.py b/nets/ZINC_graph_regression/load_net.py new file mode 100644 index 0000000..03dd401 --- /dev/null +++ b/nets/ZINC_graph_regression/load_net.py @@ -0,0 +1,31 @@ +""" + Utility file to select GraphNN model as + selected by the user +""" + +from nets.ZINC_graph_regression.gatedgcn_net import GatedGCNNet +from nets.ZINC_graph_regression.pna_net import PNANet +from nets.ZINC_graph_regression.san_net import SANNet +from nets.ZINC_graph_regression.graphit_net import GraphiTNet + +def GatedGCN(net_params): + return GatedGCNNet(net_params) + +def PNA(net_params): + return PNANet(net_params) + +def SAN(net_params): + return SANNet(net_params) + +def GraphiT(net_params): + return GraphiTNet(net_params) + +def gnn_model(MODEL_NAME, net_params): + models = { + 'GatedGCN': GatedGCN, + 'PNA': PNA, + 'SAN': SAN, + 'GraphiT': GraphiT + } + + return models[MODEL_NAME](net_params) \ No newline at end of file diff --git a/nets/ZINC_graph_regression/pna_net.py b/nets/ZINC_graph_regression/pna_net.py new file mode 100644 index 0000000..8ca7f2b --- /dev/null +++ b/nets/ZINC_graph_regression/pna_net.py @@ -0,0 +1,168 @@ +import torch +import torch.nn as nn +import dgl +from scipy import sparse as sp +from scipy.sparse.linalg import norm + + +""" + PNA and PNA-LSPE + +""" + +from layers.pna_layer import PNALayer +from layers.pna_lspe_layer import PNALSPELayer +from layers.pna_utils import GRU +from layers.mlp_readout_layer import MLPReadout + +class PNANet(nn.Module): + def __init__(self, net_params): + super().__init__() + num_atom_type = net_params['num_atom_type'] + num_bond_type = net_params['num_bond_type'] + hidden_dim = net_params['hidden_dim'] + out_dim = net_params['out_dim'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + n_layers = net_params['L'] + self.readout = net_params['readout'] + self.graph_norm = net_params['graph_norm'] + self.batch_norm = net_params['batch_norm'] + self.residual = net_params['residual'] + self.aggregators = net_params['aggregators'] + self.scalers = net_params['scalers'] + self.avg_d = net_params['avg_d'] + self.towers = net_params['towers'] + self.divide_input_first = net_params['divide_input_first'] + self.divide_input_last = net_params['divide_input_last'] + self.edge_feat = net_params['edge_feat'] + edge_dim = net_params['edge_dim'] + pretrans_layers = net_params['pretrans_layers'] + posttrans_layers = net_params['posttrans_layers'] + self.gru_enable = net_params['gru'] + device = net_params['device'] + self.device = device + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, hidden_dim) + + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + + self.embedding_h = nn.Embedding(num_atom_type, hidden_dim) + + if self.edge_feat: + self.embedding_e = nn.Embedding(num_bond_type, edge_dim) + + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([PNALSPELayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, + graph_norm=self.graph_norm, batch_norm=self.batch_norm, + residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, + avg_d=self.avg_d, towers=self.towers, edge_features=self.edge_feat, + edge_dim=edge_dim, divide_input=self.divide_input_first, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers) for _ + in range(n_layers - 1)]) + self.layers.append(PNALSPELayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout, + graph_norm=self.graph_norm, batch_norm=self.batch_norm, + residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, + avg_d=self.avg_d, towers=self.towers, divide_input=self.divide_input_last, + edge_features=self.edge_feat, edge_dim=edge_dim, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers)) + else: + # NoPE + self.layers = nn.ModuleList([PNALayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, + graph_norm=self.graph_norm, batch_norm=self.batch_norm, + residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, + avg_d=self.avg_d, towers=self.towers, edge_features=self.edge_feat, + edge_dim=edge_dim, divide_input=self.divide_input_first, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers) for _ + in range(n_layers - 1)]) + self.layers.append(PNALayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout, + graph_norm=self.graph_norm, batch_norm=self.batch_norm, + residual=self.residual, aggregators=self.aggregators, scalers=self.scalers, + avg_d=self.avg_d, towers=self.towers, divide_input=self.divide_input_last, + edge_features=self.edge_feat, edge_dim=edge_dim, + pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers)) + + if self.gru_enable: + self.gru = GRU(hidden_dim, hidden_dim, device) + + self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(out_dim+self.pos_enc_dim, out_dim) + + self.g = None # For util; To be accessed in loss() function + + def forward(self, g, h, p, e, snorm_n): + h = self.embedding_h(h) + h = self.in_feat_dropout(h) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + if self.edge_feat: + e = self.embedding_e(e) + + for i, conv in enumerate(self.layers): + h_t, p_t = conv(g, h, p, e, snorm_n) + if self.gru_enable and i != len(self.layers) - 1: + h_t = self.gru(h, h_t) + h, p = h_t, p_t + + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + p = self.p_out(p) + g.ndata['p'] = p + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, scores, targets): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = nn.L1Loss()(scores, targets) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/nets/ZINC_graph_regression/san_net.py b/nets/ZINC_graph_regression/san_net.py new file mode 100644 index 0000000..0951e49 --- /dev/null +++ b/nets/ZINC_graph_regression/san_net.py @@ -0,0 +1,147 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import dgl +import numpy as np + +from scipy import sparse as sp + +""" + SAN-GT and SAN-GT-LSPE + +""" +from layers.san_gt_layer import SAN_GT_Layer +from layers.san_gt_lspe_layer import SAN_GT_LSPE_Layer +from layers.mlp_readout_layer import MLPReadout + +class SANNet(nn.Module): + def __init__(self, net_params): + super().__init__() + + num_atom_type = net_params['num_atom_type'] + num_bond_type = net_params['num_bond_type'] + + full_graph = net_params['full_graph'] + init_gamma = net_params['init_gamma'] + + # learn gamma + self.gamma = nn.Parameter(torch.FloatTensor([init_gamma])) + + GT_layers = net_params['L'] + GT_hidden_dim = net_params['hidden_dim'] + GT_out_dim = net_params['out_dim'] + GT_n_heads = net_params['n_heads'] + + self.residual = net_params['residual'] + self.readout = net_params['readout'] + in_feat_dropout = net_params['in_feat_dropout'] + dropout = net_params['dropout'] + + self.readout = net_params['readout'] + self.layer_norm = net_params['layer_norm'] + self.batch_norm = net_params['batch_norm'] + + self.device = net_params['device'] + self.in_feat_dropout = nn.Dropout(in_feat_dropout) + self.pe_init = net_params['pe_init'] + + self.use_lapeig_loss = net_params['use_lapeig_loss'] + self.lambda_loss = net_params['lambda_loss'] + self.alpha_loss = net_params['alpha_loss'] + + self.pos_enc_dim = net_params['pos_enc_dim'] + + if self.pe_init in ['rand_walk']: + self.embedding_p = nn.Linear(self.pos_enc_dim, GT_hidden_dim) + + self.embedding_h = nn.Embedding(num_atom_type, GT_hidden_dim) + self.embedding_e = nn.Embedding(num_bond_type, GT_hidden_dim) + + if self.pe_init == 'rand_walk': + # LSPE + self.layers = nn.ModuleList([ SAN_GT_LSPE_Layer(self.gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range(GT_layers-1) ]) + self.layers.append(SAN_GT_LSPE_Layer(self.gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual)) + else: + # NoPE + self.layers = nn.ModuleList([ SAN_GT_Layer(self.gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range(GT_layers-1) ]) + self.layers.append(SAN_GT_Layer(self.gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, + dropout, self.layer_norm, self.batch_norm, self.residual)) + + self.MLP_layer = MLPReadout(GT_out_dim, 1) # 1 out dim since regression problem + + if self.pe_init == 'rand_walk': + self.p_out = nn.Linear(GT_out_dim, self.pos_enc_dim) + self.Whp = nn.Linear(GT_out_dim+self.pos_enc_dim, GT_out_dim) + + self.g = None # For util; To be accessed in loss() function + + + def forward(self, g, h, p, e, snorm_n): + + # input embedding + h = self.embedding_h(h) + e = self.embedding_e(e) + + h = self.in_feat_dropout(h) + + if self.pe_init in ['rand_walk']: + p = self.embedding_p(p) + + # GNN + for conv in self.layers: + h, p = conv(g, h, p, e, snorm_n) + g.ndata['h'] = h + + if self.pe_init == 'rand_walk': + p = self.p_out(p) + g.ndata['p'] = p + + if self.use_lapeig_loss and self.pe_init == 'rand_walk': + # Implementing p_g = p_g - torch.mean(p_g, dim=0) + means = dgl.mean_nodes(g, 'p') + batch_wise_p_means = means.repeat_interleave(g.batch_num_nodes(), 0) + p = p - batch_wise_p_means + + # Implementing p_g = p_g / torch.norm(p_g, p=2, dim=0) + g.ndata['p'] = p + g.ndata['p2'] = g.ndata['p']**2 + norms = dgl.sum_nodes(g, 'p2') + norms = torch.sqrt(norms+1e-6) + batch_wise_p_l2_norms = norms.repeat_interleave(g.batch_num_nodes(), 0) + p = p / batch_wise_p_l2_norms + g.ndata['p'] = p + + if self.pe_init == 'rand_walk': + # Concat h and p + hp = self.Whp(torch.cat((g.ndata['h'],g.ndata['p']),dim=-1)) + g.ndata['h'] = hp + + # readout + if self.readout == "sum": + hg = dgl.sum_nodes(g, 'h') + elif self.readout == "max": + hg = dgl.max_nodes(g, 'h') + elif self.readout == "mean": + hg = dgl.mean_nodes(g, 'h') + else: + hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes + + self.g = g # For util; To be accessed in loss() function + + return self.MLP_layer(hg), g + + def loss(self, scores, targets): + + # Loss A: Task loss ------------------------------------------------------------- + loss_a = nn.L1Loss()(scores, targets) + + if self.use_lapeig_loss: + raise NotImplementedError + else: + loss = loss_a + + return loss \ No newline at end of file diff --git a/scripts/OGBMOL/script_MOLPCBA_all.sh b/scripts/OGBMOL/script_MOLPCBA_all.sh new file mode 100644 index 0000000..d9231f2 --- /dev/null +++ b/scripts/OGBMOL/script_MOLPCBA_all.sh @@ -0,0 +1,65 @@ +#!/bin/bash + + +############ +# Usage +############ + +# bash script_MOLPCBA_all.sh + + +#################################### +# MOLPCBA - 4 SEED RUNS OF EACH EXPTS +#################################### + +seed0=41 +seed1=95 +seed2=12 +seed3=35 +code=main_OGBMOL_graph_classification.py +dataset=OGBG-MOLPCBA +tmux new -s gnn_lspe_PCBA -d +tmux send-keys "source ~/.bashrc" C-m +tmux send-keys "source activate gnn_lspe" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLPCBA_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLPCBA_LapPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLPCBA_LapPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLPCBA_LapPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLPCBA_LapPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLPCBA_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_MOLPCBA_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_MOLPCBA_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_MOLPCBA_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_MOLPCBA_LSPE.json' & +wait" C-m +tmux send-keys "tmux kill-session -t gnn_lspe_PCBA" C-m + + + + + + + + + + + diff --git a/scripts/OGBMOL/script_MOLTOX21_all.sh b/scripts/OGBMOL/script_MOLTOX21_all.sh new file mode 100644 index 0000000..4aa698e --- /dev/null +++ b/scripts/OGBMOL/script_MOLTOX21_all.sh @@ -0,0 +1,95 @@ +#!/bin/bash + + +############ +# Usage +############ + +# bash script_MOLTOX21_all.sh + + +#################################### +# MOLTOX21 - 4 SEED RUNS OF EACH EXPTS +#################################### + +seed0=41 +seed1=95 +seed2=12 +seed3=35 +code=main_OGBMOL_graph_classification.py +dataset=OGBG-MOLTOX21 +tmux new -s gnn_lspe_TOX21 -d +tmux send-keys "source ~/.bashrc" C-m +tmux send-keys "source activate gnn_lspe" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLTOX21_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLTOX21_LapPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLTOX21_LapPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLTOX21_LapPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLTOX21_LapPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_MOLTOX21_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_MOLTOX21_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_MOLTOX21_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_MOLTOX21_LSPE_withLapEigLoss.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SAN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SAN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SAN_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SAN_MOLTOX21_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SAN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SAN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SAN_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SAN_MOLTOX21_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GraphiT_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GraphiT_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GraphiT_MOLTOX21_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GraphiT_MOLTOX21_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GraphiT_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GraphiT_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GraphiT_MOLTOX21_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GraphiT_MOLTOX21_LSPE.json' & +wait" C-m +tmux send-keys "tmux kill-session -t gnn_lspe_TOX21" C-m + + + + + + + + + + + diff --git a/scripts/StatisticalResults/generate_statistics_OGBMOL.ipynb b/scripts/StatisticalResults/generate_statistics_OGBMOL.ipynb new file mode 100644 index 0000000..714efbf --- /dev/null +++ b/scripts/StatisticalResults/generate_statistics_OGBMOL.ipynb @@ -0,0 +1,176 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f_dir = \"../../out/PNA_MOLTOX21_NoPE/results/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "file = []\n", + "dataset = []\n", + "model = []\n", + "layer = []\n", + "params = []\n", + "acc_test = []\n", + "acc_train = []\n", + "convergence = []\n", + "total_time = []\n", + "epoch_time = []\n", + "\n", + "for filename in os.listdir(f_dir):\n", + "\n", + " if filename[-4:] == \".txt\":\n", + " file.append( filename )\n", + " \n", + " with open(os.path.join(f_dir, filename), \"r\") as f:\n", + " lines = f.readlines()\n", + "\n", + " for line in lines:\n", + " # print('h1c',line)\n", + "\n", + " if line[:9] == \"Dataset: \":\n", + " dataset.append( line[9:-2] )\n", + "\n", + " if line[:7] == \"Model: \":\n", + " model.append( line[7:-1] )\n", + "\n", + " if line[:17] == \"net_params={'L': \":\n", + " layer.append( line[17:18] )\n", + " \n", + " if line[:56] == \"net_params={'full_graph': True, 'init_gamma': 0.1, 'L': \":\n", + " layer.append( line[56:58] )\n", + " \n", + " if line[:37] == \"net_params={'full_graph': True, 'L': \":\n", + " layer.append( line[37:39] )\n", + " \n", + " if line[:18] == \"Total Parameters: \":\n", + " params.append( line[18:-1] )\n", + "\n", + " if line[:10] == \"TEST AUC: \":\n", + " acc_test.append( float(line[10:-1]) )\n", + " \n", + " if line[:11] == \"TRAIN AUC: \":\n", + " acc_train.append( float(line[11:-1]) )\n", + " \n", + " if line[:35] == \" Convergence Time (Epochs): \":\n", + " convergence.append( float(line[35:-1]) )\n", + "\n", + " if line[:18] == \"Total Time Taken: \":\n", + " total_time.append( float(line[18:-4]) )\n", + "\n", + " if line[:24] == 'Average Time Per Epoch: ':\n", + " epoch_time.append( float(line[24:-2]) )\n", + " \n", + " \n", + " \n", + " \n", + "# print('file',file)\n", + "# print('dataset',dataset)\n", + "# print('model',model)\n", + "# print('layer',layer)\n", + "# print('params',params)\n", + "# print('acc_test',acc_test)\n", + "# print('acc_train',acc_train)\n", + "# print('convergence',convergence)\n", + "# print('total_time',total_time)\n", + "# print('epoch_time',epoch_time)\n", + "\n", + "\n", + "\n", + "\n", + "list_datasets = ['ogbg-moltox21']\n", + "#print('list_datasets',list_datasets)\n", + "\n", + "list_gnns = ['GatedGCN', 'PNA', 'SAN', 'GraphiT']\n", + "#print('list_gnns',list_gnns)\n", + "\n", + "\n", + " \n", + "for data in list_datasets:\n", + " #print(data)\n", + "\n", + " for gnn in list_gnns:\n", + " #print('gnn:',gnn)\n", + "\n", + " acc_test_one_gnn = []\n", + " acc_train_one_gnn = []\n", + " convergence_one_gnn = []\n", + " total_time_one_gnn = []\n", + " epoch_time_one_gnn = []\n", + " nb_seeds = 0\n", + "\n", + " for i in range(len(file)):\n", + " #print(params[i])\n", + " \n", + " if data==dataset[i] and gnn==model[i]:\n", + " params_one_gnn = params[i]\n", + " acc_test_one_gnn.append(acc_test[i])\n", + " acc_train_one_gnn.append(acc_train[i])\n", + " convergence_one_gnn.append(convergence[i])\n", + " total_time_one_gnn.append(total_time[i])\n", + " epoch_time_one_gnn.append(epoch_time[i])\n", + " L = layer[i]\n", + " nb_seeds = nb_seeds + 1\n", + "\n", + " #print(params_one_gnn)\n", + " if len(acc_test_one_gnn)>0:\n", + " print(acc_test_one_gnn)\n", + " latex_str = f\"{data} & {nb_seeds} & {gnn} & {L} & {params_one_gnn} & {np.mean(acc_test_one_gnn):.3f}$\\pm${np.std(acc_test_one_gnn):.3f} & {np.mean(acc_train_one_gnn):.3f}$\\pm${np.std(acc_train_one_gnn):.3f} & {np.mean(convergence_one_gnn):.2f} & {np.mean(epoch_time_one_gnn):.2f}s/{np.mean(total_time_one_gnn):.2f}hr\"\n", + " print(\"\\nDataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\\n{}\".format(latex_str,nb_seeds))\n", + "\n", + " \n", + "\n", + "print(\"\\n\")\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/scripts/StatisticalResults/generate_statistics_ZINC.ipynb b/scripts/StatisticalResults/generate_statistics_ZINC.ipynb new file mode 100644 index 0000000..01ff269 --- /dev/null +++ b/scripts/StatisticalResults/generate_statistics_ZINC.ipynb @@ -0,0 +1,174 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f_dir = \"../../out/GraphiT_ZINC_LSPE_noLapEigLoss/results/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "file = []\n", + "dataset = []\n", + "model = []\n", + "layer = []\n", + "params = []\n", + "acc_test = []\n", + "acc_train = []\n", + "convergence = []\n", + "total_time = []\n", + "epoch_time = []\n", + "\n", + "for filename in os.listdir(f_dir):\n", + "\n", + " if filename[-4:] == \".txt\":\n", + " file.append( filename )\n", + " \n", + " with open(os.path.join(f_dir, filename), \"r\") as f:\n", + " lines = f.readlines()\n", + "\n", + " for line in lines:\n", + " #print('h1c',line)\n", + "\n", + " if line[:9] == \"Dataset: \":\n", + " dataset.append( line[9:-2] )\n", + "\n", + " if line[:7] == \"Model: \":\n", + " model.append( line[7:-1] )\n", + "\n", + " if line[:17] == \"net_params={'L': \":\n", + " layer.append( line[17:18] )\n", + " \n", + " if line[:56] == \"net_params={'full_graph': True, 'init_gamma': 0.1, 'L': \":\n", + " layer.append( line[56:58])\n", + " \n", + " if line[:37] == \"net_params={'full_graph': True, 'L': \":\n", + " layer.append( line[37:39])\n", + " \n", + " if line[:18] == \"Total Parameters: \":\n", + " params.append( line[18:-1] )\n", + "\n", + " if line[:10] == \"TEST MAE: \":\n", + " acc_test.append( float(line[10:-1]) )\n", + " \n", + " if line[:11] == \"TRAIN MAE: \":\n", + " acc_train.append( float(line[11:-1]) )\n", + " \n", + " if line[4:31] == \"Convergence Time (Epochs): \":\n", + " convergence.append( float(line[31:-1]) )\n", + "\n", + " if line[:18] == \"Total Time Taken: \":\n", + " total_time.append( float(line[18:-4]) )\n", + "\n", + " if line[:24] == 'Average Time Per Epoch: ':\n", + " epoch_time.append( float(line[24:-2]) )\n", + " \n", + " \n", + " \n", + " \n", + "# print('file',file)\n", + "# print('dataset',dataset)\n", + "# print('model',model)\n", + "# print('layer',layer)\n", + "# print('params',params)\n", + "# print('acc_test',acc_test)\n", + "# print('acc_train',acc_train)\n", + "# print('convergence',convergence)\n", + "# print('total_time',total_time)\n", + "# print('epoch_time',epoch_time)\n", + "\n", + "\n", + "\n", + "\n", + "list_datasets = ['ZINC']\n", + "#print('list_datasets',list_datasets)\n", + "\n", + "list_gnns = ['GatedGCN', 'PNA', 'SAN', 'GraphiT']\n", + "#print('list_gnns',list_gnns)\n", + "\n", + "\n", + "for data in list_datasets:\n", + " #print(data)\n", + "\n", + " for gnn in list_gnns:\n", + " #print('gnn:',gnn)\n", + "\n", + " acc_test_one_gnn = []\n", + " acc_train_one_gnn = []\n", + " convergence_one_gnn = []\n", + " total_time_one_gnn = []\n", + " epoch_time_one_gnn = []\n", + " nb_seeds = 0\n", + "\n", + " for i in range(len(file)):\n", + " #print(params[i])\n", + " if data==dataset[i] and gnn==model[i]:\n", + " params_one_gnn = params[i]\n", + " acc_test_one_gnn.append(acc_test[i])\n", + " acc_train_one_gnn.append(acc_train[i])\n", + " convergence_one_gnn.append(convergence[i])\n", + " total_time_one_gnn.append(total_time[i])\n", + " epoch_time_one_gnn.append(epoch_time[i])\n", + " L = layer[i]\n", + " nb_seeds = nb_seeds + 1\n", + "\n", + " #print(params_one_gnn)\n", + " if len(acc_test_one_gnn)>0:\n", + " print(acc_test_one_gnn)\n", + " latex_str = f\"{data} & {nb_seeds} & {gnn} & {L} & {params_one_gnn} & {np.mean(acc_test_one_gnn):.3f}$\\pm${np.std(acc_test_one_gnn):.3f} & {np.mean(acc_train_one_gnn):.3f}$\\pm${np.std(acc_train_one_gnn):.3f} & {np.mean(convergence_one_gnn):.2f} & {np.mean(epoch_time_one_gnn):.2f}s/{np.mean(total_time_one_gnn):.2f}hr\"\n", + " print(\"\\nDataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\\n{}\".format(latex_str,nb_seeds))\n", + "\n", + " \n", + "\n", + "print(\"\\n\")\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/scripts/TensorBoard/script_tensorboard.sh b/scripts/TensorBoard/script_tensorboard.sh new file mode 100644 index 0000000..81ddd33 --- /dev/null +++ b/scripts/TensorBoard/script_tensorboard.sh @@ -0,0 +1,21 @@ +#!/bin/bash + + +# bash script_tensorboard.sh + + + + + +tmux new -s tensorboard -d +tmux send-keys "source activate gnn_lspe" C-m +tmux send-keys "tensorboard --logdir out/ --port 6006" C-m + + + + + + + + + diff --git a/scripts/ZINC/script_ZINC_all.sh b/scripts/ZINC/script_ZINC_all.sh new file mode 100644 index 0000000..30cb7bc --- /dev/null +++ b/scripts/ZINC/script_ZINC_all.sh @@ -0,0 +1,97 @@ +#!/bin/bash + + +############ +# Usage +############ + +# bash script_ZINC_all.sh + + +#################################### +# ZINC - 4 SEED RUNS OF EACH EXPTS +#################################### + +seed0=41 +seed1=95 +seed2=12 +seed3=35 +code=main_ZINC_graph_regression.py +dataset=ZINC +tmux new -s gnn_lspe_ZINC -d +tmux send-keys "source ~/.bashrc" C-m +tmux send-keys "source activate gnn_lspe" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_ZINC_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_ZINC_LapPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_ZINC_LapPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_ZINC_LapPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_ZINC_LapPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_ZINC_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GatedGCN_ZINC_LSPE_withLapEigLoss.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_ZINC_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/PNA_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/PNA_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/PNA_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/PNA_ZINC_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SAN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SAN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SAN_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SAN_ZINC_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SAN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SAN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SAN_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SAN_ZINC_LSPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GraphiT_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GraphiT_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GraphiT_ZINC_NoPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GraphiT_ZINC_NoPE.json' & +wait" C-m +tmux send-keys " +python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/GraphiT_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/GraphiT_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/GraphiT_ZINC_LSPE.json' & +python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/GraphiT_ZINC_LSPE.json' & +wait" C-m +tmux send-keys "tmux kill-session -t gnn_lspe_ZINC" C-m + + + + + + + + + + + + + diff --git a/train/metrics.py b/train/metrics.py new file mode 100644 index 0000000..b584da0 --- /dev/null +++ b/train/metrics.py @@ -0,0 +1,68 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from sklearn.metrics import confusion_matrix +from sklearn.metrics import f1_score +import numpy as np + + +def MAE(scores, targets): + MAE = F.l1_loss(scores, targets) + MAE = MAE.detach().item() + return MAE + + +def accuracy_TU(scores, targets): + scores = scores.detach().argmax(dim=1) + acc = (scores==targets).float().sum().item() + return acc + + +def accuracy_MNIST_CIFAR(scores, targets): + scores = scores.detach().argmax(dim=1) + acc = (scores==targets).float().sum().item() + return acc + +def accuracy_CITATION_GRAPH(scores, targets): + scores = scores.detach().argmax(dim=1) + acc = (scores==targets).float().sum().item() + acc = acc / len(targets) + return acc + + +def accuracy_SBM(scores, targets): + S = targets.cpu().numpy() + C = np.argmax( torch.nn.Softmax(dim=1)(scores).cpu().detach().numpy() , axis=1 ) + CM = confusion_matrix(S,C).astype(np.float32) + nb_classes = CM.shape[0] + targets = targets.cpu().detach().numpy() + nb_non_empty_classes = 0 + pr_classes = np.zeros(nb_classes) + for r in range(nb_classes): + cluster = np.where(targets==r)[0] + if cluster.shape[0] != 0: + pr_classes[r] = CM[r,r]/ float(cluster.shape[0]) + if CM[r,r]>0: + nb_non_empty_classes += 1 + else: + pr_classes[r] = 0.0 + acc = 100.* np.sum(pr_classes)/ float(nb_classes) + return acc + + +def binary_f1_score(scores, targets): + """Computes the F1 score using scikit-learn for binary class labels. + + Returns the F1 score for the positive class, i.e. labelled '1'. + """ + y_true = targets.cpu().numpy() + y_pred = scores.argmax(dim=1).cpu().numpy() + return f1_score(y_true, y_pred, average='binary') + + +def accuracy_VOC(scores, targets): + scores = scores.detach().argmax(dim=1).cpu() + targets = targets.cpu().detach().numpy() + acc = f1_score(scores, targets, average='weighted') + return acc diff --git a/train/train_OGBMOL_graph_classification.py b/train/train_OGBMOL_graph_classification.py new file mode 100644 index 0000000..ba74bef --- /dev/null +++ b/train/train_OGBMOL_graph_classification.py @@ -0,0 +1,131 @@ +""" + Utility functions for training one epoch + and evaluating one epoch +""" +import numpy as np +import torch +import torch.nn as nn +import math +from tqdm import tqdm + +import dgl + + +def train_epoch_sparse(model, optimizer, device, data_loader, epoch, evaluator): + model.train() + + epoch_loss = 0 + nb_data = 0 + + y_true = [] + y_pred = [] + + for iter, (batch_graphs, batch_labels, batch_snorm_n) in enumerate(data_loader): + optimizer.zero_grad() + + batch_graphs = batch_graphs.to(device) + batch_x = batch_graphs.ndata['feat'].to(device) + batch_e = batch_graphs.edata['feat'].to(device) + batch_labels = batch_labels.to(device) + batch_snorm_n = batch_snorm_n.to(device) + + try: + batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) + except KeyError: + batch_pos_enc = None + + if model.pe_init == 'lap_pe': + sign_flip = torch.rand(batch_pos_enc.size(1)).to(device) + sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0 + batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0) + + batch_pred, __ = model.forward(batch_graphs, batch_x, batch_pos_enc, batch_e, batch_snorm_n) + del __ + + # ignore nan labels (unlabeled) when computing training loss + is_labeled = batch_labels == batch_labels + loss = model.loss(batch_pred.to(torch.float32)[is_labeled], batch_labels.to(torch.float32)[is_labeled]) + + loss.backward() + optimizer.step() + + y_true.append(batch_labels.view(batch_pred.shape).detach().cpu()) + y_pred.append(batch_pred.detach().cpu()) + + epoch_loss += loss.detach().item() + nb_data += batch_labels.size(0) + + epoch_loss /= (iter + 1) + + y_true = torch.cat(y_true, dim = 0).numpy() + y_pred = torch.cat(y_pred, dim = 0).numpy() + + # compute performance metric using OGB evaluator + input_dict = {"y_true": y_true, "y_pred": y_pred} + perf = evaluator.eval(input_dict) + + if batch_labels.size(1) == 128: # MOLPCBA + return_perf = perf['ap'] + elif batch_labels.size(1) == 12: # MOLTOX21 + return_perf = perf['rocauc'] + + return epoch_loss, return_perf, optimizer + +def evaluate_network_sparse(model, device, data_loader, epoch, evaluator): + model.eval() + + epoch_loss = 0 + nb_data = 0 + + y_true = [] + y_pred = [] + + out_graphs_for_lapeig_viz = [] + + with torch.no_grad(): + for iter, (batch_graphs, batch_labels, batch_snorm_n) in enumerate(data_loader): + batch_graphs = batch_graphs.to(device) + batch_x = batch_graphs.ndata['feat'].to(device) + batch_e = batch_graphs.edata['feat'].to(device) + batch_labels = batch_labels.to(device) + batch_snorm_n = batch_snorm_n.to(device) + + + try: + batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) + except KeyError: + batch_pos_enc = None + + batch_pred, batch_g = model.forward(batch_graphs, batch_x, batch_pos_enc, batch_e, batch_snorm_n) + + + # ignore nan labels (unlabeled) when computing loss + is_labeled = batch_labels == batch_labels + loss = model.loss(batch_pred.to(torch.float32)[is_labeled], batch_labels.to(torch.float32)[is_labeled]) + + y_true.append(batch_labels.view(batch_pred.shape).detach().cpu()) + y_pred.append(batch_pred.detach().cpu()) + + epoch_loss += loss.detach().item() + nb_data += batch_labels.size(0) + + if batch_g is not None: + out_graphs_for_lapeig_viz += dgl.unbatch(batch_g) + else: + out_graphs_for_lapeig_viz = None + + epoch_loss /= (iter + 1) + + y_true = torch.cat(y_true, dim = 0).numpy() + y_pred = torch.cat(y_pred, dim = 0).numpy() + + # compute performance metric using OGB evaluator + input_dict = {"y_true": y_true, "y_pred": y_pred} + perf = evaluator.eval(input_dict) + + if batch_labels.size(1) == 128: # MOLPCBA + return_perf = perf['ap'] + elif batch_labels.size(1) == 12: # MOLTOX21 + return_perf = perf['rocauc'] + + return epoch_loss, return_perf, out_graphs_for_lapeig_viz \ No newline at end of file diff --git a/train/train_ZINC_graph_regression.py b/train/train_ZINC_graph_regression.py new file mode 100644 index 0000000..40c522c --- /dev/null +++ b/train/train_ZINC_graph_regression.py @@ -0,0 +1,84 @@ +""" + Utility functions for training one epoch + and evaluating one epoch +""" +import torch +import torch.nn as nn +import math + +import dgl + +from train.metrics import MAE + + + +def train_epoch_sparse(model, optimizer, device, data_loader, epoch): + model.train() + epoch_loss = 0 + epoch_train_mae = 0 + nb_data = 0 + gpu_mem = 0 + for iter, (batch_graphs, batch_targets, batch_snorm_n) in enumerate(data_loader): + batch_graphs = batch_graphs.to(device) + batch_x = batch_graphs.ndata['feat'].to(device) # num x feat + batch_e = batch_graphs.edata['feat'].to(device) + batch_targets = batch_targets.to(device) + batch_snorm_n = batch_snorm_n.to(device) + optimizer.zero_grad() + + try: + batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) + except KeyError: + batch_pos_enc = None + + if model.pe_init == 'lap_pe': + sign_flip = torch.rand(batch_pos_enc.size(1)).to(device) + sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0 + batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0) + + batch_scores, __ = model.forward(batch_graphs, batch_x, batch_pos_enc, batch_e, batch_snorm_n) + del __ + + loss = model.loss(batch_scores, batch_targets) + loss.backward() + optimizer.step() + epoch_loss += loss.detach().item() + epoch_train_mae += MAE(batch_scores, batch_targets) + nb_data += batch_targets.size(0) + epoch_loss /= (iter + 1) + epoch_train_mae /= (iter + 1) + + return epoch_loss, epoch_train_mae, optimizer + +def evaluate_network_sparse(model, device, data_loader, epoch): + model.eval() + epoch_test_loss = 0 + epoch_test_mae = 0 + nb_data = 0 + out_graphs_for_lapeig_viz = [] + with torch.no_grad(): + for iter, (batch_graphs, batch_targets, batch_snorm_n) in enumerate(data_loader): + batch_graphs = batch_graphs.to(device) + batch_x = batch_graphs.ndata['feat'].to(device) + batch_e = batch_graphs.edata['feat'].to(device) + batch_targets = batch_targets.to(device) + batch_snorm_n = batch_snorm_n.to(device) + + try: + batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) + except KeyError: + batch_pos_enc = None + + batch_scores, batch_g = model.forward(batch_graphs, batch_x, batch_pos_enc, batch_e, batch_snorm_n) + + loss = model.loss(batch_scores, batch_targets) + epoch_test_loss += loss.detach().item() + epoch_test_mae += MAE(batch_scores, batch_targets) + nb_data += batch_targets.size(0) + + out_graphs_for_lapeig_viz += dgl.unbatch(batch_g) + epoch_test_loss /= (iter + 1) + epoch_test_mae /= (iter + 1) + + return epoch_test_loss, epoch_test_mae, out_graphs_for_lapeig_viz + diff --git a/utils/cleaner_main.py b/utils/cleaner_main.py new file mode 100644 index 0000000..af383c8 --- /dev/null +++ b/utils/cleaner_main.py @@ -0,0 +1,102 @@ + +# Clean the main.py file after conversion from notebook. +# Any notebook code is removed from the main.py file. + + +import subprocess + + +def cleaner_main(filename): + + # file names + file_notebook = filename + '.ipynb' + file_python = filename + '.py' + + + # convert notebook to python file + print('Convert ' + file_notebook + ' to ' + file_python) + subprocess.check_output('jupyter nbconvert --to script ' + str(file_notebook) , shell=True) + + print('Clean ' + file_python) + + # open file + with open(file_python, "r") as f_in: + lines_in = f_in.readlines() + + # remove cell indices + lines_in = [ line for i,line in enumerate(lines_in) if '# In[' not in line ] + + # remove comments + lines_in = [ line for i,line in enumerate(lines_in) if line[0]!='#' ] + + # remove "in_ipynb()" function + idx_start_fnc = next((i for i, x in enumerate(lines_in) if 'def in_ipynb' in x), None) + if idx_start_fnc!=None: + idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:4] not in ['\n',' ']), None) + lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ] + list_elements_to_remove = ['in_ipynb()', 'print(notebook_mode)'] + for elem in list_elements_to_remove: + lines_in = [ line for i,line in enumerate(lines_in) if elem not in line ] + + # unindent "if notebook_mode==False" block + idx_start_fnc = next((i for i, x in enumerate(lines_in) if 'if notebook_mode==False' in x), None) + if idx_start_fnc!=None: + idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:8] not in ['\n',' ']), None) + for i in range(idx_start_fnc,idx_end_fnc+1): + lines_in[i] = lines_in[i][4:] + lines_in.pop(idx_start_fnc) + list_elements_to_remove = ['# notebook mode', '# terminal mode'] + for elem in list_elements_to_remove: + lines_in = [ line for i,line in enumerate(lines_in) if elem not in line ] + + # remove remaining "if notebook_mode==True" blocks - single indent + run = True + while run: + idx_start_fnc = next((i for i, x in enumerate(lines_in) if x[:16]=='if notebook_mode'), None) + if idx_start_fnc!=None: + idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:4] not in ['\n',' ']), None) + lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ] + else: + run = False + + # remove "if notebook_mode==True" block - double indents + idx_start_fnc = next((i for i, x in enumerate(lines_in) if x[:20]==' if notebook_mode'), None) + if idx_start_fnc!=None: + idx_end_fnc = idx_start_fnc + next((i for i, x in enumerate(lines_in[idx_start_fnc+1:]) if x[:8] not in ['\n',' ']), None) + lines_in = [ line for i,line in enumerate(lines_in) if i not in range(idx_start_fnc,idx_end_fnc+1) ] + + # prepare main() for terminal mode + idx = next((i for i, x in enumerate(lines_in) if 'def main' in x), None) + if idx!=None: lines_in[idx] = 'def main():' + idx = next((i for i, x in enumerate(lines_in) if x[:5]=='else:'), None) + if idx!=None: lines_in.pop(idx) + idx = next((i for i, x in enumerate(lines_in) if x[:10]==' main()'), None) + if idx!=None: lines_in[idx] = 'main()' + + # remove notebook variables + idx = next((i for i, x in enumerate(lines_in) if 'use_gpu = True' in x), None) + if idx!=None: lines_in.pop(idx) + idx = next((i for i, x in enumerate(lines_in) if 'gpu_id = -1' in x), None) + if idx!=None: lines_in.pop(idx) + idx = next((i for i, x in enumerate(lines_in) if 'device = None' in x), None) + if idx!=None: lines_in.pop(idx) + run = True + while run: + idx = next((i for i, x in enumerate(lines_in) if x[:10]=='MODEL_NAME'), None) + if idx!=None: + lines_in.pop(idx) + else: + run = False + + # save clean file + lines_out = str() + for line in lines_in: lines_out += line + with open(file_python, 'w') as f_out: + f_out.write(lines_out) + + print('Done. ') + + + + + diff --git a/utils/plot_util.py b/utils/plot_util.py new file mode 100644 index 0000000..0c20d65 --- /dev/null +++ b/utils/plot_util.py @@ -0,0 +1,46 @@ +""" + Util function to plot graph with eigenvectors + x-axis: first dim + y-axis: second dim +""" + +import networkx as nx + +def plot_graph_eigvec(plt, g_id, g_dgl, feature_key, actual_eigvecs=False, predicted_eigvecs=False): + + if actual_eigvecs: + plt.set_xlabel('first eigenvec') + plt.set_ylabel('second eigenvec') + else: + plt.set_xlabel('first predicted pe') + plt.set_ylabel('second predicted pe') + + g_dgl = g_dgl.cpu() + g_dgl.ndata['feats'] = g_dgl.ndata[feature_key][:,:2] + g_nx = g_dgl.to_networkx(node_attrs=['feats']) + + labels = {} + for idx, node in enumerate(g_nx.nodes()): + labels[node] = str(idx) + + num_nodes = g_dgl.num_nodes() + num_edges = g_dgl.num_edges() + + edge_list = [] + srcs, dsts = g_dgl.edges() + for edge_i in range(num_edges): + edge_list.append((srcs[edge_i].item(), dsts[edge_i].item())) + + # fig, ax = plt.subplots() + # first 2-dim of eigenvecs are x,y coordinates, and the 3rd dim of eigenvec is plotted as node intensity + # intensities = g_dgl.ndata['feats'][:,2] + nx.draw_networkx_nodes(g_nx, g_dgl.ndata['feats'][:,:2].numpy(), node_color='r', node_size=180, label=list(range(g_dgl.number_of_nodes()))) + nx.draw_networkx_edges(g_nx, g_dgl.ndata['feats'][:,:2].numpy(), edge_list, alpha=0.3) + nx.draw_networkx_labels(g_nx, g_dgl.ndata['feats'][:,:2].numpy(), labels, font_size=16) + plt.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True) + + title = "Graph ID: " + str(g_id) + + title += " | Actual eigvecs" if actual_eigvecs else " | Predicted PEs" + plt.title.set_text(title) + \ No newline at end of file diff --git a/utils/visualize_RWPE_studies.ipynb b/utils/visualize_RWPE_studies.ipynb new file mode 100644 index 0000000..63e30a6 --- /dev/null +++ b/utils/visualize_RWPE_studies.ipynb @@ -0,0 +1,197 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b941618a", + "metadata": {}, + "source": [ + "## ZINC Visualization with Init PE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "58a892b7", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.chdir('../') # go to root folder of the project\n", + "print(os.getcwd())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ab99754", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import networkx as nx\n", + "from itertools import count\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from itertools import count" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2adb4608", + "metadata": {}, + "outputs": [], + "source": [ + "from data.data import LoadData\n", + "zinc_d = LoadData('ZINC')\n", + "\n", + "pos_enc_dim = 24\n", + "zinc_d._init_positional_encodings(pos_enc_dim, 'rand_walk')\n", + "zinc_d._add_eig_vecs(36)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b12e503", + "metadata": {}, + "outputs": [], + "source": [ + "num_nodes = []\n", + "num_unique_RWPEs = []\n", + "num_unique_LapPEs = []\n", + "\n", + "for g_ in zinc_d.val:\n", + " num_nodes.append(g_[0].number_of_nodes())\n", + " num_unique_RWPEs.append(len(torch.unique(g_[0].ndata['pos_enc'], dim=0)))\n", + "for g_ in zinc_d.val:\n", + " num_unique_LapPEs.append(len(torch.unique(g_[0].ndata['eigvec'], dim=0)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "edcf8c63", + "metadata": {}, + "outputs": [], + "source": [ + "def plot_init_PE_comparison(num_nodes, num_unique_PEs, PE='RWPE'):\n", + " fig = plt.figure(dpi=100, figsize=(7, 6))\n", + " ax = plt.axes()\n", + " plt.xlabel(\"Number of nodes\", fontsize=10)\n", + " if PE == 'RWPE':\n", + " plt.title(\"ZINC val (1K): Comparison of no. of nodes v/s no. of unique RWPEs\", fontsize=15)\n", + " plt.ylabel(\"Number of unique RWPEs\", fontsize=10)\n", + " elif PE == 'LapPE':\n", + " plt.title(\"ZINC val (1K): Comparison of no. of nodes v/s no. of unique LapPEs\", fontsize=15)\n", + " plt.ylabel(\"Number of unique LapPEs\", fontsize=10)\n", + " x = np.array(num_nodes)\n", + " y1 = np.array(num_unique_PEs)\n", + " #plt.xticks(x)\n", + " plt.hist2d(x, y1, (50, 50), cmap=plt.cm.Reds)# plt.cm.jet)\n", + " plt.colorbar()\n", + " plt.xlim([10, 35])\n", + " plt.ylim([10, 35])\n", + " #plt.scatter(x, y1, marker=\"o\", color=\"green\", linewidth=0.5)\n", + " x = np.linspace(9,40,100)\n", + " y = x\n", + " plt.plot(x, y, '-r', )\n", + "\n", + " #fig.savefig('out_ZINC_PE_viz/ZINC_valset_'+PE+'.pdf', bbox_inches='tight') \n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f1de5dc", + "metadata": {}, + "outputs": [], + "source": [ + "plot_init_PE_comparison(num_nodes, num_unique_RWPEs, 'RWPE')\n", + "plot_init_PE_comparison(num_nodes, num_unique_LapPEs, 'LapPE')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "607a71d7", + "metadata": {}, + "outputs": [], + "source": [ + "graph_ids = [212,672] # not equal\n", + "graph_ids = [91,967] # equal" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "960aa77f", + "metadata": {}, + "outputs": [], + "source": [ + "for idx in graph_ids:\n", + " fig = plt.figure(dpi=100, figsize=(8, 6))\n", + " g_zinc_trial = zinc_d.val[idx][0]\n", + " g = g_zinc_trial.to_networkx(node_attrs=['pos_enc'])\n", + " #groups = set(nx.get_node_attributes(g,'pos_enc').values())\n", + " groups = torch.unique(g_zinc_trial.ndata['pos_enc'],dim=0)\n", + " mapping = dict(zip(groups,count()))\n", + " nodes = g.nodes()\n", + " colors = []\n", + " for n in nodes:\n", + " for key in mapping.keys():\n", + " if torch.equal(key, g.nodes[n]['pos_enc']):\n", + " color = mapping[key]\n", + " colors.append(color)\n", + " \n", + " pos = nx.spring_layout(g)\n", + " ec = nx.draw_networkx_edges(g, pos, alpha=0.2)\n", + " nc = nx.draw_networkx_nodes(g, pos, nodelist=nodes, node_color=colors, \n", + " node_size=100, cmap=plt.cm.jet)\n", + " plt.colorbar(nc)\n", + " #plt.xlabel(\"ZINC Val id: \" +str(idx), fontsize=16)\n", + " plt.title(\"nodes: \"+ str(num_nodes[idx]) + \" | unique RWPEs: \"+str(num_unique_RWPEs[idx]), fontsize=16)\n", + " #fig.savefig('out_ZINC_PE_viz/ZINC_valset_graph_'+str(idx)+'.pdf', bbox_inches='tight') \n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59af93f9", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e552577", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}